diff mbox

Use single shared memory block pool for all pool allocators

Message ID 55BF291A.2010405@gmail.com
State New
Headers show

Commit Message

Mikhail Maltsev Aug. 3, 2015, 8:40 a.m. UTC
On Jul 26, 2015, at 11:50 AM, Andi Kleen <andi@firstfloor.org> wrote:
> I've been compiling gcc with tcmalloc to do a similar speedup. It would be
> interesting to compare that to your patch.
I repeated the test with TCMalloc and jemalloc. TCMalloc shows nice results,
though it required some tweaks: this allocator has a threshold block size equal
to 32 KB, larger blocks are allocated from global heap, rather than thread cache
(and this operation is expensive), so the original patch shows worse performance
when used with TCMalloc. In order to fix this, I reduced the block size to 8 KB.
Here there are 5 columns for each value: pristine version, pristine version +
TCMalloc (and the difference in parenthesis), and patched version with TCMalloc
(difference is relative to pristine version). Likewise, for memory usage.

400.perlbench        26.86  26.17 (  -2.57%)  26.17 (  -2.57%) user
                      0.56   0.64 ( +14.29%)   0.61 (  +8.93%) sys
                     27.45  26.84 (  -2.22%)  26.81 (  -2.33%) real
401.bzip2             2.53    2.5 (  -1.19%)   2.48 (  -1.98%) user
                      0.07   0.09 ( +28.57%)    0.1 ( +42.86%) sys
                      2.61    2.6 (  -0.38%)   2.59 (  -0.77%) real
403.gcc              73.59  72.62 (  -1.32%)  71.72 (  -2.54%) user
                      1.59   1.88 ( +18.24%)   1.88 ( +18.24%) sys
                     75.27  74.58 (  -0.92%)  73.67 (  -2.13%) real
429.mcf                0.4   0.41 (  +2.50%)    0.4 (  +0.00%) user
                      0.03   0.05 ( +66.67%)   0.05 ( +66.67%) sys
                      0.44   0.47 (  +6.82%)   0.47 (  +6.82%) real
433.milc              3.22   3.24 (  +0.62%)   3.25 (  +0.93%) user
                      0.22   0.32 ( +45.45%)    0.3 ( +36.36%) sys
                      3.48   3.59 (  +3.16%)   3.59 (  +3.16%) real
444.namd              7.54   7.41 (  -1.72%)   7.37 (  -2.25%) user
                       0.1   0.15 ( +50.00%)   0.15 ( +50.00%) sys
                      7.66   7.58 (  -1.04%)   7.54 (  -1.57%) real
445.gobmk            20.24  19.59 (  -3.21%)   19.6 (  -3.16%) user
                      0.52   0.67 ( +28.85%)   0.59 ( +13.46%) sys
                      20.8  20.29 (  -2.45%)  20.23 (  -2.74%) real
450.soplex           19.08  18.47 (  -3.20%)  18.51 (  -2.99%) user
                      0.87   1.11 ( +27.59%)   1.06 ( +21.84%) sys
                     19.99  19.62 (  -1.85%)   19.6 (  -1.95%) real
453.povray           42.27  41.42 (  -2.01%)  41.32 (  -2.25%) user
                      2.71   3.11 ( +14.76%)   3.09 ( +14.02%) sys
                     45.04  44.58 (  -1.02%)  44.47 (  -1.27%) real
456.hmmer             7.27   7.22 (  -0.69%)   7.15 (  -1.65%) user
                      0.31   0.36 ( +16.13%)   0.39 ( +25.81%) sys
                      7.61   7.61 (  +0.00%)   7.57 (  -0.53%) real
458.sjeng             3.22   3.14 (  -2.48%)   3.15 (  -2.17%) user
                      0.09   0.16 ( +77.78%)   0.14 ( +55.56%) sys
                      3.32   3.32 (  +0.00%)    3.3 (  -0.60%) real
462.libquantum        0.86   0.87 (  +1.16%)   0.85 (  -1.16%) user
                      0.05   0.08 ( +60.00%)   0.08 ( +60.00%) sys
                      0.92   0.96 (  +4.35%)   0.94 (  +2.17%) real
464.h264ref          27.62  27.27 (  -1.27%)  27.16 (  -1.67%) user
                      0.63   0.73 ( +15.87%)   0.75 ( +19.05%) sys
                     28.28  28.03 (  -0.88%)  27.95 (  -1.17%) real
470.lbm               0.27   0.27 (  +0.00%)   0.27 (  +0.00%) user
                      0.01   0.01 (  +0.00%)   0.01 (  +0.00%) sys
                      0.29   0.29 (  +0.00%)   0.29 (  +0.00%) real
471.omnetpp          28.29  27.63 (  -2.33%)  27.54 (  -2.65%) user
                       1.5   1.57 (  +4.67%)   1.62 (  +8.00%) sys
                     29.84  29.25 (  -1.98%)  29.21 (  -2.11%) real
473.astar             1.14   1.12 (  -1.75%)   1.11 (  -2.63%) user
                      0.05   0.07 ( +40.00%)   0.09 ( +80.00%) sys
                      1.21   1.21 (  +0.00%)    1.2 (  -0.83%) real
482.sphinx3           4.65   4.57 (  -1.72%)   4.59 (  -1.29%) user
                       0.2    0.3 ( +50.00%)   0.26 ( +30.00%) sys
                      4.88   4.89 (  +0.20%)   4.88 (  +0.00%) real
483.xalancbmk        284.5  276.4 (  -2.85%) 276.48 (  -2.82%) user
                     20.29  23.03 ( +13.50%)  22.82 ( +12.47%) sys
                    305.19 299.79 (  -1.77%) 299.67 (  -1.81%) real

400.perlbench     102308kB    123004kB  (  +20696kB)    116104kB  (  +13796kB)
401.bzip2          74628kB     86936kB  (  +12308kB)     84316kB  (   +9688kB)
403.gcc           190284kB    218180kB  (  +27896kB)    212480kB  (  +22196kB)
429.mcf            19804kB     24464kB  (   +4660kB)     24320kB  (   +4516kB)
433.milc           36940kB     45308kB  (   +8368kB)     44652kB  (   +7712kB)
444.namd          183548kB    193856kB  (  +10308kB)    192632kB  (   +9084kB)
445.gobmk          73724kB     78792kB  (   +5068kB)     79192kB  (   +5468kB)
450.soplex         62076kB     67596kB  (   +5520kB)     66856kB  (   +4780kB)
453.povray        180620kB    208480kB  (  +27860kB)    207576kB  (  +26956kB)
456.hmmer          39544kB     47380kB  (   +7836kB)     46776kB  (   +7232kB)
458.sjeng          40144kB     48652kB  (   +8508kB)     47608kB  (   +7464kB)
462.libquantum     23464kB     28576kB  (   +5112kB)     28260kB  (   +4796kB)
464.h264ref       708760kB    738400kB  (  +29640kB)    734224kB  (  +25464kB)
470.lbm            26552kB     31684kB  (   +5132kB)     31348kB  (   +4796kB)
471.omnetpp       152000kB    172924kB  (  +20924kB)    167204kB  (  +15204kB)
473.astar          27036kB     31472kB  (   +4436kB)     31380kB  (   +4344kB)
482.sphinx3        33100kB     40812kB  (   +7712kB)     39496kB  (   +6396kB)
483.xalancbmk     368844kB    393292kB  (  +24448kB)    393032kB  (  +24188kB)


jemalloc causes regression (and that is rather surprising, because my previous
tests showed the opposite result, but those tests had very small workload - in
fact, a single file).

On 07/27/2015 12:13 PM, Richard Biener wrote:
>>> On Jul 26, 2015, at 11:50 AM, Andi Kleen <andi@firstfloor.org> wrote:
>>> Another useful optimization is to adjust the allocation size to be >=
>>> 2MB. Then modern Linux kernels often can give you a large page,
>>> which cuts down TLB overhead. I did similar changes some time
>>> ago for the garbage collector.
>>
>> Unless you are running with 64k pages which I do all the time on my armv8 system.
> 
> This can be a host configurable value of course.
Yes, I actually mentioned that among possible enhancements. I think that code
from ggc-page.c can be reused (it already implements querying page size from OS).

> But first of all (without looking at the patch but just reading the
> description) this
> sounds like a good idea.  Maybe still allow pools to use their own backing if
> the object size is larger than the block size of the caching pool?
Yes, I though about it, but I hesitated, whether this should be implemented in
advance. I attached the updated patch.

Comments

Mikhail Maltsev Aug. 31, 2015, 6:17 a.m. UTC | #1
Ping.

On 08/03/2015 11:40 AM, Mikhail Maltsev wrote:
> On Jul 26, 2015, at 11:50 AM, Andi Kleen <andi@firstfloor.org> wrote:
>> I've been compiling gcc with tcmalloc to do a similar speedup. It would be
>> interesting to compare that to your patch.
> I repeated the test with TCMalloc and jemalloc. TCMalloc shows nice results,
> though it required some tweaks: this allocator has a threshold block size equal
> to 32 KB, larger blocks are allocated from global heap, rather than thread cache
> (and this operation is expensive), so the original patch shows worse performance
> when used with TCMalloc. In order to fix this, I reduced the block size to 8 KB.
> Here there are 5 columns for each value: pristine version, pristine version +
> TCMalloc (and the difference in parenthesis), and patched version with TCMalloc
> (difference is relative to pristine version). Likewise, for memory usage.
> 
> 400.perlbench        26.86  26.17 (  -2.57%)  26.17 (  -2.57%) user
>                       0.56   0.64 ( +14.29%)   0.61 (  +8.93%) sys
>                      27.45  26.84 (  -2.22%)  26.81 (  -2.33%) real
> 401.bzip2             2.53    2.5 (  -1.19%)   2.48 (  -1.98%) user
>                       0.07   0.09 ( +28.57%)    0.1 ( +42.86%) sys
>                       2.61    2.6 (  -0.38%)   2.59 (  -0.77%) real
> 403.gcc              73.59  72.62 (  -1.32%)  71.72 (  -2.54%) user
>                       1.59   1.88 ( +18.24%)   1.88 ( +18.24%) sys
>                      75.27  74.58 (  -0.92%)  73.67 (  -2.13%) real
> 429.mcf                0.4   0.41 (  +2.50%)    0.4 (  +0.00%) user
>                       0.03   0.05 ( +66.67%)   0.05 ( +66.67%) sys
>                       0.44   0.47 (  +6.82%)   0.47 (  +6.82%) real
> 433.milc              3.22   3.24 (  +0.62%)   3.25 (  +0.93%) user
>                       0.22   0.32 ( +45.45%)    0.3 ( +36.36%) sys
>                       3.48   3.59 (  +3.16%)   3.59 (  +3.16%) real
> 444.namd              7.54   7.41 (  -1.72%)   7.37 (  -2.25%) user
>                        0.1   0.15 ( +50.00%)   0.15 ( +50.00%) sys
>                       7.66   7.58 (  -1.04%)   7.54 (  -1.57%) real
> 445.gobmk            20.24  19.59 (  -3.21%)   19.6 (  -3.16%) user
>                       0.52   0.67 ( +28.85%)   0.59 ( +13.46%) sys
>                       20.8  20.29 (  -2.45%)  20.23 (  -2.74%) real
> 450.soplex           19.08  18.47 (  -3.20%)  18.51 (  -2.99%) user
>                       0.87   1.11 ( +27.59%)   1.06 ( +21.84%) sys
>                      19.99  19.62 (  -1.85%)   19.6 (  -1.95%) real
> 453.povray           42.27  41.42 (  -2.01%)  41.32 (  -2.25%) user
>                       2.71   3.11 ( +14.76%)   3.09 ( +14.02%) sys
>                      45.04  44.58 (  -1.02%)  44.47 (  -1.27%) real
> 456.hmmer             7.27   7.22 (  -0.69%)   7.15 (  -1.65%) user
>                       0.31   0.36 ( +16.13%)   0.39 ( +25.81%) sys
>                       7.61   7.61 (  +0.00%)   7.57 (  -0.53%) real
> 458.sjeng             3.22   3.14 (  -2.48%)   3.15 (  -2.17%) user
>                       0.09   0.16 ( +77.78%)   0.14 ( +55.56%) sys
>                       3.32   3.32 (  +0.00%)    3.3 (  -0.60%) real
> 462.libquantum        0.86   0.87 (  +1.16%)   0.85 (  -1.16%) user
>                       0.05   0.08 ( +60.00%)   0.08 ( +60.00%) sys
>                       0.92   0.96 (  +4.35%)   0.94 (  +2.17%) real
> 464.h264ref          27.62  27.27 (  -1.27%)  27.16 (  -1.67%) user
>                       0.63   0.73 ( +15.87%)   0.75 ( +19.05%) sys
>                      28.28  28.03 (  -0.88%)  27.95 (  -1.17%) real
> 470.lbm               0.27   0.27 (  +0.00%)   0.27 (  +0.00%) user
>                       0.01   0.01 (  +0.00%)   0.01 (  +0.00%) sys
>                       0.29   0.29 (  +0.00%)   0.29 (  +0.00%) real
> 471.omnetpp          28.29  27.63 (  -2.33%)  27.54 (  -2.65%) user
>                        1.5   1.57 (  +4.67%)   1.62 (  +8.00%) sys
>                      29.84  29.25 (  -1.98%)  29.21 (  -2.11%) real
> 473.astar             1.14   1.12 (  -1.75%)   1.11 (  -2.63%) user
>                       0.05   0.07 ( +40.00%)   0.09 ( +80.00%) sys
>                       1.21   1.21 (  +0.00%)    1.2 (  -0.83%) real
> 482.sphinx3           4.65   4.57 (  -1.72%)   4.59 (  -1.29%) user
>                        0.2    0.3 ( +50.00%)   0.26 ( +30.00%) sys
>                       4.88   4.89 (  +0.20%)   4.88 (  +0.00%) real
> 483.xalancbmk        284.5  276.4 (  -2.85%) 276.48 (  -2.82%) user
>                      20.29  23.03 ( +13.50%)  22.82 ( +12.47%) sys
>                     305.19 299.79 (  -1.77%) 299.67 (  -1.81%) real
> 
> 400.perlbench     102308kB    123004kB  (  +20696kB)    116104kB  (  +13796kB)
> 401.bzip2          74628kB     86936kB  (  +12308kB)     84316kB  (   +9688kB)
> 403.gcc           190284kB    218180kB  (  +27896kB)    212480kB  (  +22196kB)
> 429.mcf            19804kB     24464kB  (   +4660kB)     24320kB  (   +4516kB)
> 433.milc           36940kB     45308kB  (   +8368kB)     44652kB  (   +7712kB)
> 444.namd          183548kB    193856kB  (  +10308kB)    192632kB  (   +9084kB)
> 445.gobmk          73724kB     78792kB  (   +5068kB)     79192kB  (   +5468kB)
> 450.soplex         62076kB     67596kB  (   +5520kB)     66856kB  (   +4780kB)
> 453.povray        180620kB    208480kB  (  +27860kB)    207576kB  (  +26956kB)
> 456.hmmer          39544kB     47380kB  (   +7836kB)     46776kB  (   +7232kB)
> 458.sjeng          40144kB     48652kB  (   +8508kB)     47608kB  (   +7464kB)
> 462.libquantum     23464kB     28576kB  (   +5112kB)     28260kB  (   +4796kB)
> 464.h264ref       708760kB    738400kB  (  +29640kB)    734224kB  (  +25464kB)
> 470.lbm            26552kB     31684kB  (   +5132kB)     31348kB  (   +4796kB)
> 471.omnetpp       152000kB    172924kB  (  +20924kB)    167204kB  (  +15204kB)
> 473.astar          27036kB     31472kB  (   +4436kB)     31380kB  (   +4344kB)
> 482.sphinx3        33100kB     40812kB  (   +7712kB)     39496kB  (   +6396kB)
> 483.xalancbmk     368844kB    393292kB  (  +24448kB)    393032kB  (  +24188kB)
> 
> 
> jemalloc causes regression (and that is rather surprising, because my previous
> tests showed the opposite result, but those tests had very small workload - in
> fact, a single file).
> 
> On 07/27/2015 12:13 PM, Richard Biener wrote:
>>>> On Jul 26, 2015, at 11:50 AM, Andi Kleen <andi@firstfloor.org> wrote:
>>>> Another useful optimization is to adjust the allocation size to be >=
>>>> 2MB. Then modern Linux kernels often can give you a large page,
>>>> which cuts down TLB overhead. I did similar changes some time
>>>> ago for the garbage collector.
>>>
>>> Unless you are running with 64k pages which I do all the time on my armv8 system.
>>
>> This can be a host configurable value of course.
> Yes, I actually mentioned that among possible enhancements. I think that code
> from ggc-page.c can be reused (it already implements querying page size from OS).
> 
>> But first of all (without looking at the patch but just reading the
>> description) this
>> sounds like a good idea.  Maybe still allow pools to use their own backing if
>> the object size is larger than the block size of the caching pool?
> Yes, I though about it, but I hesitated, whether this should be implemented in
> advance. I attached the updated patch.
>
diff mbox

Patch

diff --git a/gcc/alloc-pool.c b/gcc/alloc-pool.c
index 7e25915..f8c1351 100644
--- a/gcc/alloc-pool.c
+++ b/gcc/alloc-pool.c
@@ -35,3 +35,25 @@  dump_alloc_pool_statistics (void)
 
   pool_allocator_usage.dump (ALLOC_POOL_ORIGIN);
 }
+
+/* Global singleton-like instance.  */
+memory_block_pool memory_block_pool::instance;
+
+memory_block_pool::memory_block_pool () : m_blocks (NULL) {}
+
+memory_block_pool::~memory_block_pool ()
+{
+  release ();
+}
+
+/* Free all memory allocated by memory_block_pool.  */
+void
+memory_block_pool::release ()
+{
+  while (m_blocks)
+    {
+      block_list *next = m_blocks->m_next;
+      XDELETEVEC (m_blocks);
+      m_blocks = next;
+    }
+}
diff --git a/gcc/alloc-pool.h b/gcc/alloc-pool.h
index 03bde63..dccc41a 100644
--- a/gcc/alloc-pool.h
+++ b/gcc/alloc-pool.h
@@ -95,18 +95,102 @@  struct pool_usage: public mem_usage
 
 extern mem_alloc_description<pool_usage> pool_allocator_usage;
 
+/* Shared pool which allows other memory pools to reuse each others' allocated
+   memory blocks instead of calling free/malloc again.  */
+class memory_block_pool
+{
+public:
+  /* Blocks have fixed size.  This is necessary for sharing.  */
+  static const size_t block_size = 64 * 1024;
+
+  memory_block_pool ();
+  ~memory_block_pool ();
+
+  static inline void *allocate () ATTRIBUTE_MALLOC;
+  static inline void remove (void *);
+  void release ();
+
+private:
+  /* memory_block_pool singleton instance, defined in alloc-pool.c.  */
+  static memory_block_pool instance;
+
+  struct block_list
+  {
+    block_list *m_next;
+  };
+
+  /* Free list.  */
+  block_list *m_blocks;
+};
+
+/* Allocate single block.  Reuse previously returned block, if possible.  */
+inline void *
+memory_block_pool::allocate ()
+{
+  if (instance.m_blocks == NULL)
+    return XNEWVEC (char, block_size);
+
+  void *result = instance.m_blocks;
+  instance.m_blocks = instance.m_blocks->m_next;
+  return result;
+}
+
+/* Return UNCAST_BLOCK to pool.  */
+inline void
+memory_block_pool::remove (void *uncast_block)
+{
+  block_list *block = reinterpret_cast<block_list *> (uncast_block);
+  block->m_next = instance.m_blocks;
+  instance.m_blocks = block;
+}
+
+#if 0
+/* If a pool with custom block size is needed, one might use the following
+   template.  An instance of this template can be used as a parameter for
+   instantiating base_pool_allocator template:
+
+	typedef custom_block_allocator <128*1024> huge_block_allocator;
+	...
+	static base_pool_allocator <huge_block_allocator>
+						value_pool ("value", 16384);
+
+   Right now it's not used anywhere in the code, and is given here as an
+   example).  */
+
+template <size_t BlockSize>
+class custom_block_allocator
+{
+public:
+  static const size_t block_size = BlockSize;
+
+  static inline void *
+  allocate () ATTRIBUTE_MALLOC
+  {
+    return XNEWVEC (char, BlockSize);
+  }
+
+  static inline void
+  remove (void *block)
+  {
+    XDELETEVEC (block);
+  }
+};
+#endif
+
 /* Generic pool allocator.  */
-class pool_allocator
+
+template <typename TBlockAllocator>
+class base_pool_allocator
 {
 public:
-  /* Default constructor for pool allocator called NAME.  Each block
-     has NUM elements.  */
-  pool_allocator (const char *name, size_t num, size_t size CXX_MEM_STAT_INFO);
-  ~pool_allocator ();
+  /* Default constructor for pool allocator called NAME.  */
+  base_pool_allocator (const char *name, size_t size CXX_MEM_STAT_INFO);
+  ~base_pool_allocator ();
   void release ();
   void release_if_empty ();
   void *allocate () ATTRIBUTE_MALLOC;
   void remove (void *object);
+  size_t num_elts_current ();
 
 private:
   struct allocation_pool_list
@@ -151,7 +235,7 @@  private:
   };
 
   /* Align X to 8.  */
-  size_t
+  static inline size_t
   align_eight (size_t x)
   {
     return (((x+7) >> 3) << 3);
@@ -180,8 +264,6 @@  private:
   size_t m_blocks_allocated;
   /* List of blocks that are used to allocate new objects.  */
   allocation_pool_list *m_block_list;
-  /* The number of elements in a block.  */
-  size_t m_block_size;
   /* Size of a pool elements in bytes.  */
   size_t m_elt_size;
   /* Size in bytes that should be allocated for each element.  */
@@ -192,24 +274,24 @@  private:
   mem_location m_location;
 };
 
+template <typename TBlockAllocator>
 inline
-pool_allocator::pool_allocator (const char *name, size_t num,
-				size_t size MEM_STAT_DECL):
-  m_name (name), m_id (0), m_elts_per_block (num), m_returned_free_list (NULL),
+base_pool_allocator <TBlockAllocator>::base_pool_allocator (
+				const char *name, size_t size MEM_STAT_DECL):
+  m_name (name), m_id (0), m_elts_per_block (0), m_returned_free_list (NULL),
   m_virgin_free_list (NULL), m_virgin_elts_remaining (0), m_elts_allocated (0),
-  m_elts_free (0), m_blocks_allocated (0), m_block_list (NULL),
-  m_block_size (0), m_size (size), m_initialized (false),
-  m_location (ALLOC_POOL_ORIGIN, false PASS_MEM_STAT) {}
+  m_elts_free (0), m_blocks_allocated (0), m_block_list (NULL), m_size (size),
+  m_initialized (false), m_location (ALLOC_POOL_ORIGIN, false PASS_MEM_STAT) {}
 
 /* Initialize a pool allocator.  */
 
+template <typename TBlockAllocator>
 inline void
-pool_allocator::initialize ()
+base_pool_allocator <TBlockAllocator>::initialize ()
 {
   gcc_checking_assert (!m_initialized);
   m_initialized = true;
 
-  size_t header_size;
   size_t size = m_size;
 
   gcc_checking_assert (m_name);
@@ -218,15 +300,12 @@  pool_allocator::initialize ()
   if (size < sizeof (allocation_pool_list*))
     size = sizeof (allocation_pool_list*);
 
-  /* Now align the size to a multiple of 4.  */
+  /* Now align the size to a multiple of 8.  */
   size = align_eight (size);
 
   /* Add the aligned size of ID.  */
   size += offsetof (allocation_object, u.data);
 
-  /* Um, we can't really allocate 0 elements per block.  */
-  gcc_checking_assert (m_elts_per_block);
-
   m_elt_size = size;
 
   if (GATHER_STATISTICS)
@@ -239,9 +318,10 @@  pool_allocator::initialize ()
     }
 
   /* List header size should be a multiple of 8.  */
-  header_size = align_eight (sizeof (allocation_pool_list));
+  size_t header_size = align_eight (sizeof (allocation_pool_list));
 
-  m_block_size = (size * m_elts_per_block) + header_size;
+  m_elts_per_block = (TBlockAllocator::block_size - header_size) / size;
+  gcc_checking_assert (m_elts_per_block != 0);
 
 #ifdef ENABLE_CHECKING
   /* Increase the last used ID and use it for this pool.
@@ -255,8 +335,9 @@  pool_allocator::initialize ()
 }
 
 /* Free all memory allocated for the given memory pool.  */
+template <typename TBlockAllocator>
 inline void
-pool_allocator::release ()
+base_pool_allocator <TBlockAllocator>::release ()
 {
   if (!m_initialized)
     return;
@@ -267,7 +348,7 @@  pool_allocator::release ()
   for (block = m_block_list; block != NULL; block = next_block)
     {
       next_block = block->next;
-      free (block);
+      TBlockAllocator::remove (block);
     }
 
   if (GATHER_STATISTICS)
@@ -285,21 +366,24 @@  pool_allocator::release ()
   m_block_list = NULL;
 }
 
-void
-inline pool_allocator::release_if_empty ()
+template <typename TBlockAllocator>
+inline void
+base_pool_allocator <TBlockAllocator>::release_if_empty ()
 {
   if (m_elts_free == m_elts_allocated)
     release ();
 }
 
-inline pool_allocator::~pool_allocator ()
+template <typename TBlockAllocator>
+inline base_pool_allocator <TBlockAllocator>::~base_pool_allocator ()
 {
   release ();
 }
 
 /* Allocates one element from the pool specified.  */
+template <typename TBlockAllocator>
 inline void*
-pool_allocator::allocate ()
+base_pool_allocator <TBlockAllocator>::allocate ()
 {
   if (!m_initialized)
     initialize ();
@@ -327,7 +411,7 @@  pool_allocator::allocate ()
 	  allocation_pool_list *block_header;
 
 	  /* Make the block.  */
-	  block = XNEWVEC (char, m_block_size);
+	  block = reinterpret_cast<char *> (TBlockAllocator::allocate ());
 	  block_header = (allocation_pool_list*) block;
 	  block += align_eight (sizeof (allocation_pool_list));
 
@@ -378,8 +462,9 @@  pool_allocator::allocate ()
 }
 
 /* Puts PTR back on POOL's free list.  */
+template <typename TBlockAllocator>
 inline void
-pool_allocator::remove (void *object)
+base_pool_allocator <TBlockAllocator>::remove (void *object)
 {
   gcc_checking_assert (m_initialized);
 
@@ -412,15 +497,28 @@  pool_allocator::remove (void *object)
     }
 }
 
+/* Number of elements currently active (not returned to pool).  Used for cheap
+   consistency checks.  */
+template <typename TBlockAllocator>
+inline size_t
+base_pool_allocator <TBlockAllocator>::num_elts_current ()
+{
+  return m_elts_allocated - m_elts_free;
+}
+
+/* Specialization of base_pool_allocator which should be used in most cases.
+   Another specialization may be needed, if object size is greater than
+   memory_block_pool::block_size (64 KB).  */
+typedef base_pool_allocator <memory_block_pool> pool_allocator;
+
 /* Type based memory pool allocator.  */
 template <typename T>
 class object_allocator
 {
 public:
-  /* Default constructor for pool allocator called NAME.  Each block
-     has NUM elements.  */
-  object_allocator (const char *name, size_t num CXX_MEM_STAT_INFO):
-    m_allocator (name, num, sizeof (T) PASS_MEM_STAT) {}
+  /* Default constructor for pool allocator called NAME.  */
+  object_allocator (const char *name CXX_MEM_STAT_INFO):
+    m_allocator (name, sizeof (T) PASS_MEM_STAT) {}
 
   inline void
   release ()
@@ -448,6 +546,12 @@  public:
     m_allocator.remove (object);
   }
 
+  inline size_t
+  num_elts_current ()
+  {
+    return m_allocator.num_elts_current ();
+  }
+
 private:
   pool_allocator m_allocator;
 };
diff --git a/gcc/asan.c b/gcc/asan.c
index 4f5adaa..7c243cd 100644
--- a/gcc/asan.c
+++ b/gcc/asan.c
@@ -350,7 +350,7 @@  struct asan_mem_ref
   HOST_WIDE_INT access_size;
 };
 
-object_allocator <asan_mem_ref> asan_mem_ref_pool ("asan_mem_ref", 10);
+object_allocator <asan_mem_ref> asan_mem_ref_pool ("asan_mem_ref");
 
 /* Initializes an instance of asan_mem_ref.  */
 
diff --git a/gcc/c-family/c-format.c b/gcc/c-family/c-format.c
index 4bc3147..d92a4fd 100644
--- a/gcc/c-family/c-format.c
+++ b/gcc/c-family/c-format.c
@@ -1687,8 +1687,7 @@  check_format_arg (void *ctx, tree format_tree,
      will decrement it if it finds there are extra arguments, but this way
      need not adjust it for every return.  */
   res->number_other++;
-  object_allocator <format_wanted_type> fwt_pool ("format_wanted_type pool",
-						  10);
+  object_allocator <format_wanted_type> fwt_pool ("format_wanted_type pool");
   check_format_info_main (res, info, format_chars, format_length,
 			  params, arg_num, fwt_pool);
 }
diff --git a/gcc/cfg.c b/gcc/cfg.c
index c998492..2bc7857 100644
--- a/gcc/cfg.c
+++ b/gcc/cfg.c
@@ -1052,7 +1052,7 @@  void
 initialize_original_copy_tables (void)
 {
   original_copy_bb_pool = new object_allocator<htab_bb_copy_original_entry>
-    ("original_copy", 10);
+    ("original_copy");
   bb_original = new hash_table<bb_copy_hasher> (10);
   bb_copy = new hash_table<bb_copy_hasher> (10);
   loop_copy = new hash_table<bb_copy_hasher> (10);
diff --git a/gcc/cselib.c b/gcc/cselib.c
index 2149959..4264394 100644
--- a/gcc/cselib.c
+++ b/gcc/cselib.c
@@ -246,11 +246,11 @@  static unsigned int cfa_base_preserved_regno = INVALID_REGNUM;
    each time memory is invalidated.  */
 static cselib_val *first_containing_mem = &dummy_val;
 
-static object_allocator<elt_list> elt_list_pool ("elt_list", 10);
-static object_allocator<elt_loc_list> elt_loc_list_pool ("elt_loc_list", 10);
-static object_allocator<cselib_val> cselib_val_pool ("cselib_val_list", 10);
+static object_allocator<elt_list> elt_list_pool ("elt_list");
+static object_allocator<elt_loc_list> elt_loc_list_pool ("elt_loc_list");
+static object_allocator<cselib_val> cselib_val_pool ("cselib_val_list");
 
-static pool_allocator value_pool ("value", 100, RTX_CODE_SIZE (VALUE));
+static pool_allocator value_pool ("value", RTX_CODE_SIZE (VALUE));
 
 /* If nonnull, cselib will call this function before freeing useless
    VALUEs.  A VALUE is deemed useless if its "locs" field is null.  */
diff --git a/gcc/df-problems.c b/gcc/df-problems.c
index d4b5d76..0ab533f 100644
--- a/gcc/df-problems.c
+++ b/gcc/df-problems.c
@@ -1997,8 +1997,7 @@  static void
 df_chain_alloc (bitmap all_blocks ATTRIBUTE_UNUSED)
 {
   df_chain_remove_problem ();
-  df_chain->block_pool = new object_allocator<df_link> ("df_chain_block pool",
-						      50);
+  df_chain->block_pool = new object_allocator<df_link> ("df_chain_block pool");
   df_chain->optional_p = true;
 }
 
diff --git a/gcc/df-scan.c b/gcc/df-scan.c
index 93c2eae..7f6bf6f 100644
--- a/gcc/df-scan.c
+++ b/gcc/df-scan.c
@@ -133,8 +133,6 @@  static const unsigned int copy_all = copy_defs | copy_uses | copy_eq_uses
    it gets run.  It also has no need for the iterative solver.
 ----------------------------------------------------------------------------*/
 
-#define SCAN_PROBLEM_DATA_BLOCK_SIZE 512
-
 /* Problem data for the scanning dataflow function.  */
 struct df_scan_problem_data
 {
@@ -253,17 +251,17 @@  df_scan_alloc (bitmap all_blocks ATTRIBUTE_UNUSED)
   df_scan->computed = true;
 
   problem_data->ref_base_pool = new object_allocator<df_base_ref>
-    ("df_scan ref base", SCAN_PROBLEM_DATA_BLOCK_SIZE);
+    ("df_scan ref base");
   problem_data->ref_artificial_pool = new object_allocator<df_artificial_ref>
-    ("df_scan ref artificial", SCAN_PROBLEM_DATA_BLOCK_SIZE);
+    ("df_scan ref artificial");
   problem_data->ref_regular_pool = new object_allocator<df_regular_ref>
-    ("df_scan ref regular", SCAN_PROBLEM_DATA_BLOCK_SIZE);
+    ("df_scan ref regular");
   problem_data->insn_pool = new object_allocator<df_insn_info>
-    ("df_scan insn", SCAN_PROBLEM_DATA_BLOCK_SIZE);
+    ("df_scan insn");
   problem_data->reg_pool = new object_allocator<df_reg_info>
-    ("df_scan reg", SCAN_PROBLEM_DATA_BLOCK_SIZE);
+    ("df_scan reg");
   problem_data->mw_reg_pool = new object_allocator<df_mw_hardreg>
-    ("df_scan mw_reg", SCAN_PROBLEM_DATA_BLOCK_SIZE / 16);
+    ("df_scan mw_reg");
 
   bitmap_obstack_initialize (&problem_data->reg_bitmaps);
   bitmap_obstack_initialize (&problem_data->insn_bitmaps);
diff --git a/gcc/dse.c b/gcc/dse.c
index ff26fc0..d34cb48 100644
--- a/gcc/dse.c
+++ b/gcc/dse.c
@@ -308,11 +308,9 @@  lowpart_bitmask (int n)
 }
 
 typedef struct store_info *store_info_t;
-static object_allocator<store_info> cse_store_info_pool ("cse_store_info_pool",
-						       100);
+static object_allocator<store_info> cse_store_info_pool ("cse_store_info_pool");
 
-static object_allocator<store_info> rtx_store_info_pool ("rtx_store_info_pool",
-						       100);
+static object_allocator<store_info> rtx_store_info_pool ("rtx_store_info_pool");
 
 /* This structure holds information about a load.  These are only
    built for rtx bases.  */
@@ -337,8 +335,7 @@  struct read_info_type
 };
 typedef struct read_info_type *read_info_t;
 
-static object_allocator<read_info_type> read_info_type_pool
-  ("read_info_pool", 100);
+static object_allocator<read_info_type> read_info_type_pool ("read_info_pool");
 
 /* One of these records is created for each insn.  */
 
@@ -427,8 +424,7 @@  struct insn_info_type
 };
 typedef struct insn_info_type *insn_info_t;
 
-static object_allocator<insn_info_type> insn_info_type_pool
-  ("insn_info_pool", 100);
+static object_allocator<insn_info_type> insn_info_type_pool ("insn_info_pool");
 
 /* The linked list of stores that are under consideration in this
    basic block.  */
@@ -495,7 +491,7 @@  struct dse_bb_info_type
 typedef struct dse_bb_info_type *bb_info_t;
 
 static object_allocator<dse_bb_info_type> dse_bb_info_type_pool
-  ("bb_info_pool", 100);
+  ("bb_info_pool");
 
 /* Table to hold all bb_infos.  */
 static bb_info_t *bb_table;
@@ -567,8 +563,7 @@  struct group_info
 typedef struct group_info *group_info_t;
 typedef const struct group_info *const_group_info_t;
 
-static object_allocator<group_info> group_info_pool
-  ("rtx_group_info_pool", 100);
+static object_allocator<group_info> group_info_pool ("rtx_group_info_pool");
 
 /* Index into the rtx_group_vec.  */
 static int rtx_group_next_id;
@@ -594,7 +589,7 @@  struct deferred_change
 typedef struct deferred_change *deferred_change_t;
 
 static object_allocator<deferred_change> deferred_change_pool
-  ("deferred_change_pool", 10);
+  ("deferred_change_pool");
 
 static deferred_change_t deferred_change_list = NULL;
 
diff --git a/gcc/et-forest.c b/gcc/et-forest.c
index 1931285..4f919d4 100644
--- a/gcc/et-forest.c
+++ b/gcc/et-forest.c
@@ -54,8 +54,8 @@  struct et_occ
 				   depth.  */
 };
 
-static object_allocator<et_node> et_nodes ("et_nodes pool", 300);
-static object_allocator<et_occ> et_occurrences ("et_occ pool", 300);
+static object_allocator<et_node> et_nodes ("et_nodes pool");
+static object_allocator<et_occ> et_occurrences ("et_occ pool");
 
 /* Changes depth of OCC to D.  */
 
diff --git a/gcc/ipa-cp.c b/gcc/ipa-cp.c
index 8de7e56..69a181d 100644
--- a/gcc/ipa-cp.c
+++ b/gcc/ipa-cp.c
@@ -276,16 +276,16 @@  public:
 /* Allocation pools for values and their sources in ipa-cp.  */
 
 object_allocator<ipcp_value<tree> > ipcp_cst_values_pool
-  ("IPA-CP constant values", 32);
+  ("IPA-CP constant values");
 
 object_allocator<ipcp_value<ipa_polymorphic_call_context> >
-  ipcp_poly_ctx_values_pool ("IPA-CP polymorphic contexts", 32);
+  ipcp_poly_ctx_values_pool ("IPA-CP polymorphic contexts");
 
 object_allocator<ipcp_value_source<tree> > ipcp_sources_pool
-  ("IPA-CP value sources", 64);
+  ("IPA-CP value sources");
 
 object_allocator<ipcp_agg_lattice> ipcp_agg_lattice_pool
-  ("IPA_CP aggregate lattices", 32);
+  ("IPA_CP aggregate lattices");
 
 /* Maximal count found in program.  */
 
diff --git a/gcc/ipa-inline-analysis.c b/gcc/ipa-inline-analysis.c
index 3a8f0ec..4822329 100644
--- a/gcc/ipa-inline-analysis.c
+++ b/gcc/ipa-inline-analysis.c
@@ -143,7 +143,7 @@  vec<inline_edge_summary_t> inline_edge_summary_vec;
 vec<edge_growth_cache_entry> edge_growth_cache;
 
 /* Edge predicates goes here.  */
-static object_allocator<predicate> edge_predicate_pool ("edge predicates", 10);
+static object_allocator<predicate> edge_predicate_pool ("edge predicates");
 
 /* Return true predicate (tautology).
    We represent it by empty list of clauses.  */
diff --git a/gcc/ipa-profile.c b/gcc/ipa-profile.c
index 1b929c6..382897c 100644
--- a/gcc/ipa-profile.c
+++ b/gcc/ipa-profile.c
@@ -87,8 +87,7 @@  struct histogram_entry
    duplicate entries.  */
 
 vec<histogram_entry *> histogram;
-static object_allocator<histogram_entry> histogram_pool
-  ("IPA histogram", 10);
+static object_allocator<histogram_entry> histogram_pool ("IPA histogram");
 
 /* Hashtable support for storing SSA names hashed by their SSA_NAME_VAR.  */
 
diff --git a/gcc/ipa-prop.c b/gcc/ipa-prop.c
index c862cff..8e0f182 100644
--- a/gcc/ipa-prop.c
+++ b/gcc/ipa-prop.c
@@ -95,7 +95,7 @@  struct ipa_cst_ref_desc
 /* Allocation pool for reference descriptions.  */
 
 static object_allocator<ipa_cst_ref_desc> ipa_refdesc_pool
-  ("IPA-PROP ref descriptions", 32);
+  ("IPA-PROP ref descriptions");
 
 /* Return true if DECL_FUNCTION_SPECIFIC_OPTIMIZATION of the decl associated
    with NODE should prevent us from analyzing it for the purposes of IPA-CP.  */
diff --git a/gcc/ira-build.c b/gcc/ira-build.c
index 333dfb7..067a608 100644
--- a/gcc/ira-build.c
+++ b/gcc/ira-build.c
@@ -421,9 +421,9 @@  rebuild_regno_allocno_maps (void)
 
 
 /* Pools for allocnos, allocno live ranges and objects.  */
-static object_allocator<live_range> live_range_pool ("live ranges", 100);
-static object_allocator<ira_allocno> allocno_pool ("allocnos", 100);
-static object_allocator<ira_object> object_pool ("objects", 100);
+static object_allocator<live_range> live_range_pool ("live ranges");
+static object_allocator<ira_allocno> allocno_pool ("allocnos");
+static object_allocator<ira_object> object_pool ("objects");
 
 /* Vec containing references to all created allocnos.  It is a
    container of array allocnos.  */
@@ -1171,7 +1171,7 @@  finish_allocnos (void)
 
 
 /* Pools for allocno preferences.  */
-static object_allocator <ira_allocno_pref> pref_pool ("prefs", 100);
+static object_allocator <ira_allocno_pref> pref_pool ("prefs");
 
 /* Vec containing references to all created preferences.  It is a
    container of array ira_prefs.  */
@@ -1358,7 +1358,7 @@  finish_prefs (void)
 
 
 /* Pools for copies.  */
-static object_allocator<ira_allocno_copy> copy_pool ("copies", 100);
+static object_allocator<ira_allocno_copy> copy_pool ("copies");
 
 /* Vec containing references to all created copies.  It is a
    container of array ira_copies.  */
@@ -1631,8 +1631,7 @@  initiate_cost_vectors (void)
     {
       aclass = ira_allocno_classes[i];
       cost_vector_pool[aclass] = new pool_allocator
-	("cost vectors", 100,
-	 sizeof (int) * (ira_class_hard_regs_num[aclass]));
+	("cost vectors", sizeof (int) * (ira_class_hard_regs_num[aclass]));
     }
 }
 
diff --git a/gcc/ira-color.c b/gcc/ira-color.c
index 74d2c2e..e6533c6 100644
--- a/gcc/ira-color.c
+++ b/gcc/ira-color.c
@@ -1157,7 +1157,7 @@  setup_profitable_hard_regs (void)
 
 /* Pool for update cost records.  */
 static object_allocator<update_cost_record> update_cost_record_pool
-  ("update cost records", 100);
+  ("update cost records");
 
 /* Return new update cost record with given params.  */
 static struct update_cost_record *
diff --git a/gcc/lra-lives.c b/gcc/lra-lives.c
index e139846..bf25740 100644
--- a/gcc/lra-lives.c
+++ b/gcc/lra-lives.c
@@ -107,8 +107,7 @@  static sparseset unused_set, dead_set;
 static bitmap_head temp_bitmap;
 
 /* Pool for pseudo live ranges.	 */
-static object_allocator<lra_live_range> lra_live_range_pool
-  ("live ranges", 100);
+static object_allocator<lra_live_range> lra_live_range_pool ("live ranges");
 
 /* Free live range list LR.  */
 static void
diff --git a/gcc/lra.c b/gcc/lra.c
index 8ced164..895483c 100644
--- a/gcc/lra.c
+++ b/gcc/lra.c
@@ -533,7 +533,7 @@  lra_update_dups (lra_insn_recog_data_t id, signed char *nops)
    insns.  */
 
 /* Pools for insn reg info.  */
-object_allocator<lra_insn_reg> lra_insn_reg_pool ("insn regs", 100);
+object_allocator<lra_insn_reg> lra_insn_reg_pool ("insn regs");
 
 /* Create LRA insn related info about a reference to REGNO in INSN with
    TYPE (in/out/inout), biggest reference mode MODE, flag that it is
@@ -746,7 +746,7 @@  free_insn_recog_data (lra_insn_recog_data_t data)
 }
 
 /* Pools for copies.  */
-static object_allocator<lra_copy> lra_copy_pool ("lra copies", 100);
+static object_allocator<lra_copy> lra_copy_pool ("lra copies");
 
 /* Finish LRA data about all insns.  */
 static void
diff --git a/gcc/regcprop.c b/gcc/regcprop.c
index 97433f0..6f7d01e 100644
--- a/gcc/regcprop.c
+++ b/gcc/regcprop.c
@@ -75,7 +75,7 @@  struct value_data
 };
 
 static object_allocator<queued_debug_insn_change> queued_debug_insn_change_pool
-  ("debug insn changes pool", 256);
+  ("debug insn changes pool");
 
 static bool skip_debug_insn_p;
 
diff --git a/gcc/sched-deps.c b/gcc/sched-deps.c
index 3ac66e8..57ff1ef 100644
--- a/gcc/sched-deps.c
+++ b/gcc/sched-deps.c
@@ -4058,14 +4058,10 @@  sched_deps_init (bool global_p)
 
   if (global_p)
     {
-      dl_pool = new object_allocator<_deps_list> ("deps_list",
-                                   /* Allocate lists for one block at a time.  */
-                                   insns_in_block);
-      dn_pool = new object_allocator<_dep_node> ("dep_node",
-                                   /* Allocate nodes for one block at a time.
-                                      We assume that average insn has
-                                      5 producers.  */
-                                   5 * insns_in_block);
+      dl_pool = new object_allocator<_deps_list> ("deps_list");
+				/* Allocate lists for one block at a time.  */
+      dn_pool = new object_allocator<_dep_node> ("dep_node");
+				/* Allocate nodes for one block at a time.  */
     }
 }
 
diff --git a/gcc/sel-sched-ir.c b/gcc/sel-sched-ir.c
index 9988285..8ea4dce 100644
--- a/gcc/sel-sched-ir.c
+++ b/gcc/sel-sched-ir.c
@@ -59,7 +59,7 @@  vec<sel_region_bb_info_def>
     sel_region_bb_info = vNULL;
 
 /* A pool for allocating all lists.  */
-object_allocator<_list_node> sched_lists_pool ("sel-sched-lists", 500);
+object_allocator<_list_node> sched_lists_pool ("sel-sched-lists");
 
 /* This contains information about successors for compute_av_set.  */
 struct succs_info current_succs;
diff --git a/gcc/stmt.c b/gcc/stmt.c
index 391686c..61a986d 100644
--- a/gcc/stmt.c
+++ b/gcc/stmt.c
@@ -1139,7 +1139,7 @@  expand_case (gswitch *stmt)
   struct case_node *case_list = 0;
 
   /* A pool for case nodes.  */
-  object_allocator<case_node> case_node_pool ("struct case_node pool", 100);
+  object_allocator<case_node> case_node_pool ("struct case_node pool");
 
   /* An ERROR_MARK occurs for various reasons including invalid data type.
      ??? Can this still happen, with GIMPLE and all?  */
@@ -1315,8 +1315,7 @@  expand_sjlj_dispatch_table (rtx dispatch_index,
     {
       /* Similar to expand_case, but much simpler.  */
       struct case_node *case_list = 0;
-      object_allocator<case_node> case_node_pool ("struct sjlj_case pool",
-						ncases);
+      object_allocator<case_node> case_node_pool ("struct sjlj_case pool");
       tree index_expr = make_tree (index_type, dispatch_index);
       tree minval = build_int_cst (index_type, 0);
       tree maxval = CASE_LOW (dispatch_table.last ());
diff --git a/gcc/tree-sra.c b/gcc/tree-sra.c
index 818c290..6638bc1 100644
--- a/gcc/tree-sra.c
+++ b/gcc/tree-sra.c
@@ -277,7 +277,7 @@  typedef struct access *access_p;
 
 
 /* Alloc pool for allocating access structures.  */
-static object_allocator<struct access> access_pool ("SRA accesses", 16);
+static object_allocator<struct access> access_pool ("SRA accesses");
 
 /* A structure linking lhs and rhs accesses from an aggregate assignment.  They
    are used to propagate subaccesses from rhs to lhs as long as they don't
@@ -289,7 +289,7 @@  struct assign_link
 };
 
 /* Alloc pool for allocating assign link structures.  */
-static object_allocator<assign_link> assign_link_pool ("SRA links", 16);
+static object_allocator<assign_link> assign_link_pool ("SRA links");
 
 /* Base (tree) -> Vector (vec<access_p> *) map.  */
 static hash_map<tree, auto_vec<access_p> > *base_access_vec;
diff --git a/gcc/tree-ssa-math-opts.c b/gcc/tree-ssa-math-opts.c
index eae5358..c8d0d33 100644
--- a/gcc/tree-ssa-math-opts.c
+++ b/gcc/tree-ssa-math-opts.c
@@ -547,8 +547,7 @@  pass_cse_reciprocals::execute (function *fun)
   basic_block bb;
   tree arg;
 
-  occ_pool = new object_allocator<occurrence>
-    ("dominators for recip", n_basic_blocks_for_fn (fun) / 3 + 1);
+  occ_pool = new object_allocator<occurrence> ("dominators for recip");
 
   memset (&reciprocal_stats, 0, sizeof (reciprocal_stats));
   calculate_dominance_info (CDI_DOMINATORS);
diff --git a/gcc/tree-ssa-pre.c b/gcc/tree-ssa-pre.c
index 041cb78..05c2d9c 100644
--- a/gcc/tree-ssa-pre.c
+++ b/gcc/tree-ssa-pre.c
@@ -349,7 +349,7 @@  clear_expression_ids (void)
   expressions.release ();
 }
 
-static object_allocator<pre_expr_d> pre_expr_pool ("pre_expr nodes", 30);
+static object_allocator<pre_expr_d> pre_expr_pool ("pre_expr nodes");
 
 /* Given an SSA_NAME NAME, get or create a pre_expr to represent it.  */
 
@@ -488,7 +488,7 @@  static unsigned int get_expr_value_id (pre_expr);
 /* We can add and remove elements and entries to and from sets
    and hash tables, so we use alloc pools for them.  */
 
-static object_allocator<bitmap_set> bitmap_set_pool ("Bitmap sets", 30);
+static object_allocator<bitmap_set> bitmap_set_pool ("Bitmap sets");
 static bitmap_obstack grand_bitmap_obstack;
 
 /* Set of blocks with statements that have had their EH properties changed.  */
diff --git a/gcc/tree-ssa-reassoc.c b/gcc/tree-ssa-reassoc.c
index efb813c..51934c0 100644
--- a/gcc/tree-ssa-reassoc.c
+++ b/gcc/tree-ssa-reassoc.c
@@ -209,8 +209,8 @@  typedef struct operand_entry
   unsigned int count;
 } *operand_entry_t;
 
-static object_allocator<operand_entry> operand_entry_pool ("operand entry pool",
-							 30);
+static object_allocator<operand_entry> operand_entry_pool
+  ("operand entry pool");
 
 /* This is used to assign a unique ID to each struct operand_entry
    so that qsort results are identical on different hosts.  */
diff --git a/gcc/tree-ssa-sccvn.c b/gcc/tree-ssa-sccvn.c
index ab4b110..ba55f28 100644
--- a/gcc/tree-ssa-sccvn.c
+++ b/gcc/tree-ssa-sccvn.c
@@ -4125,9 +4125,9 @@  allocate_vn_table (vn_tables_t table)
   table->references = new vn_reference_table_type (23);
 
   gcc_obstack_init (&table->nary_obstack);
-  table->phis_pool = new object_allocator<vn_phi_s> ("VN phis", 30);
+  table->phis_pool = new object_allocator<vn_phi_s> ("VN phis");
   table->references_pool = new object_allocator<vn_reference_s>
-    ("VN references", 30);
+    ("VN references");
 }
 
 /* Free a value number table.  */
diff --git a/gcc/tree-ssa-strlen.c b/gcc/tree-ssa-strlen.c
index cfe4dd9..87f48bc 100644
--- a/gcc/tree-ssa-strlen.c
+++ b/gcc/tree-ssa-strlen.c
@@ -113,8 +113,7 @@  typedef struct strinfo_struct
 } *strinfo;
 
 /* Pool for allocating strinfo_struct entries.  */
-static object_allocator<strinfo_struct> strinfo_pool ("strinfo_struct pool",
-						      64);
+static object_allocator<strinfo_struct> strinfo_pool ("strinfo_struct pool");
 
 /* Vector mapping positive string indexes to strinfo, for the
    current basic block.  The first pointer in the vector is special,
diff --git a/gcc/tree-ssa-structalias.c b/gcc/tree-ssa-structalias.c
index c1f3c32..f585e3c 100644
--- a/gcc/tree-ssa-structalias.c
+++ b/gcc/tree-ssa-structalias.c
@@ -323,7 +323,7 @@  static inline bool type_can_have_subvars (const_tree);
 
 /* Pool of variable info structures.  */
 static object_allocator<variable_info> variable_info_pool
-  ("Variable info pool", 30);
+  ("Variable info pool");
 
 /* Map varinfo to final pt_solution.  */
 static hash_map<varinfo_t, pt_solution *> *final_solutions;
@@ -523,7 +523,7 @@  struct constraint
 /* List of constraints that we use to build the constraint graph from.  */
 
 static vec<constraint_t> constraints;
-static object_allocator<constraint> constraint_pool ("Constraint pool", 30);
+static object_allocator<constraint> constraint_pool ("Constraint pool");
 
 /* The constraint graph is represented as an array of bitmaps
    containing successor nodes.  */
diff --git a/gcc/var-tracking.c b/gcc/var-tracking.c
index da9de28..f1a02f9 100644
--- a/gcc/var-tracking.c
+++ b/gcc/var-tracking.c
@@ -576,28 +576,28 @@  typedef struct variable_tracking_info_def
 } *variable_tracking_info;
 
 /* Alloc pool for struct attrs_def.  */
-object_allocator<attrs_def> attrs_def_pool ("attrs_def pool", 1024);
+object_allocator<attrs_def> attrs_def_pool ("attrs_def pool");
 
 /* Alloc pool for struct variable_def with MAX_VAR_PARTS entries.  */
 
 static pool_allocator var_pool
-  ("variable_def pool", 64, sizeof (variable_def) +
+  ("variable_def pool", sizeof (variable_def) +
    (MAX_VAR_PARTS - 1) * sizeof (((variable)NULL)->var_part[0]));
 
 /* Alloc pool for struct variable_def with a single var_part entry.  */
 static pool_allocator valvar_pool
-  ("small variable_def pool", 256, sizeof (variable_def));
+  ("small variable_def pool", sizeof (variable_def));
 
 /* Alloc pool for struct location_chain_def.  */
 static object_allocator<location_chain_def> location_chain_def_pool
-  ("location_chain_def pool", 1024);
+  ("location_chain_def pool");
 
 /* Alloc pool for struct shared_hash_def.  */
 static object_allocator<shared_hash_def> shared_hash_def_pool
-  ("shared_hash_def pool", 256);
+  ("shared_hash_def pool");
 
 /* Alloc pool for struct loc_exp_dep_s for NOT_ONEPART variables.  */
-object_allocator<loc_exp_dep> loc_exp_dep_pool ("loc_exp_dep pool", 64);
+object_allocator<loc_exp_dep> loc_exp_dep_pool ("loc_exp_dep pool");
 
 /* Changed variables, notes will be emitted for them.  */
 static variable_table_type *changed_variables;