diff mbox

[01/35] Introduce new type-based pool allocator.

Message ID 83d59ba92a3c4b3ba831ebc2fce325f0416848d0.1432735040.git.mliska@suse.cz
State New
Headers show

Commit Message

Martin Liška May 27, 2015, 1:56 p.m. UTC
Hello.

Following patch set attempts to replace old-style pool allocator
to a type-based one. Moreover, as we utilize  classes and structs that are used
just by a pool allocator, these types have overwritten ctors and dtors.
Thus, using the allocator is much easier and we shouldn't cast types
back and forth. Another beneficat can be achieved in future, as we will
be able to call a class constructors to correctly register a location,
where a memory is allocated (-fgather-detailed-mem-stats).

Patch can boostrap on x86_64-linux-gnu and ppc64-linux-gnu and
survives regression tests on x86_64-linux-gnu.

Ready for trunk?
Thanks,
Martin

gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* alloc-pool.c (struct alloc_pool_descriptor): Move definition
	to header file.
	* alloc-pool.h (pool_allocator::pool_allocator): New function.
	(pool_allocator::release): Likewise.
	(inline pool_allocator::release_if_empty): Likewise.
	(inline pool_allocator::~pool_allocator): Likewise.
	(pool_allocator::allocate): Likewise.
	(pool_allocator::remove): Likewise.
---
 gcc/alloc-pool.c |  33 +-----
 gcc/alloc-pool.h | 350 +++++++++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 355 insertions(+), 28 deletions(-)

Comments

Jeff Law May 27, 2015, 5:44 p.m. UTC | #1
On 05/27/2015 07:56 AM, mliska wrote:
> Hello.
>
> Following patch set attempts to replace old-style pool allocator
> to a type-based one. Moreover, as we utilize  classes and structs that are used
> just by a pool allocator, these types have overwritten ctors and dtors.
> Thus, using the allocator is much easier and we shouldn't cast types
> back and forth. Another beneficat can be achieved in future, as we will
> be able to call a class constructors to correctly register a location,
> where a memory is allocated (-fgather-detailed-mem-stats).
>
> Patch can boostrap on x86_64-linux-gnu and ppc64-linux-gnu and
> survives regression tests on x86_64-linux-gnu.
>
> Ready for trunk?
> Thanks,
> Martin
>
> gcc/ChangeLog:
>
> 2015-04-30  Martin Liska  <mliska@suse.cz>
>
> 	* alloc-pool.c (struct alloc_pool_descriptor): Move definition
> 	to header file.
> 	* alloc-pool.h (pool_allocator::pool_allocator): New function.
> 	(pool_allocator::release): Likewise.
> 	(inline pool_allocator::release_if_empty): Likewise.
> 	(inline pool_allocator::~pool_allocator): Likewise.
> 	(pool_allocator::allocate): Likewise.
> 	(pool_allocator::remove): Likewise.
So on a general note, I don't like changing the size of the structure 
based on ENABLE_CHECKING.  If we've got other cases where we do this, 
then I guess it's OK, but if not, I'd prefer not to start doing so.


> ---

> +
> +  /* Align X to 8.  */
> +  size_t align_eight (size_t x)
> +  {
> +    return (((x+7) >> 3) << 3);
> +  }
> +
> +  const char *m_name;
> +#ifdef ENABLE_CHECKING
> +  ALLOC_POOL_ID_TYPE m_id;
> +#endif
> +  size_t m_elts_per_block;
> +
> +  /* These are the elements that have been allocated at least once and freed.  */
> +  allocation_pool_list *m_returned_free_list;
> +
> +  /* These are the elements that have not yet been allocated out of
> +     the last block obtained from XNEWVEC.  */
> +  char* m_virgin_free_list;
> +
> +  /* The number of elements in the virgin_free_list that can be
> +     allocated before needing another block.  */
> +  size_t m_virgin_elts_remaining;
> +  size_t m_elts_allocated;
> +  size_t m_elts_free;
> +  size_t m_blocks_allocated;
> +  allocation_pool_list *m_block_list;
> +  size_t m_block_size;
> +  size_t m_elt_size;
Several fields aren't documented.  They're largely self-explanatory, so 
I won't insist you document those trailing fields.  Your call whether or 
not to add docs for them.


> +
> +  /* Now align the size to a multiple of 4.  */
> +  size = align_eight (size);
Why not just aligned to 4, rather than a multiple of 4?  Presumably the 
extra 4 bytes don't matter in practice?

> +
> +template <typename T>
> +void
> +inline pool_allocator<T>::release_if_empty ()
> +{
> +  if (m_elts_free == m_elts_allocated)
> +    release ();
> +}
Is the release_if_empty all that useful in practice?

So the big issue in my mind continues to be the additional element in 
the structure when ENABLE_CHECKING is on.  As mentioned earlier, if 
we're already doing this elsewhere, then I won't object.  If we aren't, 
then I don't want to start doing so now.

The rest of the stuff are just minor questions, but nothing which would 
in my mind stop this from going forward.

Presumably your testing was with the whole series and they can't go in 
piecemeal, right?


jeff
diff mbox

Patch

diff --git a/gcc/alloc-pool.c b/gcc/alloc-pool.c
index 81909d8..0bea7a6 100644
--- a/gcc/alloc-pool.c
+++ b/gcc/alloc-pool.c
@@ -25,6 +25,8 @@  along with GCC; see the file COPYING3.  If not see
 #include "hash-table.h"
 #include "hash-map.h"
 
+ALLOC_POOL_ID_TYPE last_id;
+
 #define align_eight(x) (((x+7) >> 3) << 3)
 
 /* The internal allocation object.  */
@@ -58,36 +60,10 @@  typedef struct allocation_object_def
 #define USER_PTR_FROM_ALLOCATION_OBJECT_PTR(X)				\
    ((void *) (((allocation_object *) (X))->u.data))
 
-#ifdef ENABLE_CHECKING
-/* Last used ID.  */
-static ALLOC_POOL_ID_TYPE last_id;
-#endif
-
-/* Store information about each particular alloc_pool.  Note that this
-   will underestimate the amount the amount of storage used by a small amount:
-   1) The overhead in a pool is not accounted for.
-   2) The unallocated elements in a block are not accounted for.  Note
-   that this can at worst case be one element smaller that the block
-   size for that pool.  */
-struct alloc_pool_descriptor
-{
-  /* Number of pools allocated.  */
-  unsigned long created;
-  /* Gross allocated storage.  */
-  unsigned long allocated;
-  /* Amount of currently active storage. */
-  unsigned long current;
-  /* Peak amount of storage used.  */
-  unsigned long peak;
-  /* Size of element in the pool.  */
-  int elt_size;
-};
-
 /* Hashtable mapping alloc_pool names to descriptors.  */
-static hash_map<const char *, alloc_pool_descriptor> *alloc_pool_hash;
+hash_map<const char *, alloc_pool_descriptor> *alloc_pool_hash;
 
-/* For given name, return descriptor, create new if needed.  */
-static struct alloc_pool_descriptor *
+struct alloc_pool_descriptor *
 allocate_pool_descriptor (const char *name)
 {
   if (!alloc_pool_hash)
@@ -96,6 +72,7 @@  allocate_pool_descriptor (const char *name)
   return &alloc_pool_hash->get_or_insert (name);
 }
 
+
 /* Create a pool of things of size SIZE, with NUM in each block we
    allocate.  */
 
diff --git a/gcc/alloc-pool.h b/gcc/alloc-pool.h
index 0c30711..8fd664f 100644
--- a/gcc/alloc-pool.h
+++ b/gcc/alloc-pool.h
@@ -20,6 +20,8 @@  along with GCC; see the file COPYING3.  If not see
 #ifndef ALLOC_POOL_H
 #define ALLOC_POOL_H
 
+#include "hash-map.h"
+
 typedef unsigned long ALLOC_POOL_ID_TYPE;
 
 typedef struct alloc_pool_list_def
@@ -63,4 +65,352 @@  extern void free_alloc_pool_if_empty (alloc_pool *);
 extern void *pool_alloc (alloc_pool) ATTRIBUTE_MALLOC;
 extern void pool_free (alloc_pool, void *);
 extern void dump_alloc_pool_statistics (void);
+
+typedef unsigned long ALLOC_POOL_ID_TYPE;
+
+/* Type based memory pool allocator.  */
+template <typename T>
+class pool_allocator
+{
+public:
+  /* Default constructor for pool allocator called NAME. Each block
+     has NUM elements. The allocator support EXTRA_SIZE and can
+     potentially IGNORE_TYPE_SIZE.  */
+  pool_allocator (const char *name, size_t num, size_t extra_size = 0,
+		  bool ignore_type_size = false);
+
+  /* Default destuctor.  */
+  ~pool_allocator ();
+
+  /* Release internal data structures.  */
+  void release ();
+
+  /* Release internal data structures if the pool has not allocated
+     an object.  */
+  void release_if_empty ();
+
+  /* Allocate a new object.  */
+  T *allocate () ATTRIBUTE_MALLOC;
+
+  /* Release OBJECT that must come from the pool.  */
+  void remove (T *object);
+
+private:
+  struct allocation_pool_list
+  {
+    allocation_pool_list *next;
+  };
+
+  template <typename U>
+  struct allocation_object
+  {
+#ifdef ENABLE_CHECKING
+    /* The ID of alloc pool which the object was allocated from.  */
+    ALLOC_POOL_ID_TYPE id;
+#endif
+
+    union
+      {
+	/* The data of the object.  */
+	char data[1];
+
+	/* Because we want any type of data to be well aligned after the ID,
+	   the following elements are here.  They are never accessed so
+	   the allocated object may be even smaller than this structure.
+	   We do not care about alignment for floating-point types.  */
+	char *align_p;
+	int64_t align_i;
+      } u;
+
+    static inline allocation_object<U> *get_instance (void *data_ptr)
+    {
+      return (allocation_object<U> *)(((char *)(data_ptr)) - offsetof (allocation_object<U>, u.data));
+    }
+
+    static inline U *get_data (void *instance_ptr)
+    {
+      return (U*)(((allocation_object<U> *) instance_ptr)->u.data);
+    }
+  };
+
+  /* Align X to 8.  */
+  size_t align_eight (size_t x)
+  {
+    return (((x+7) >> 3) << 3);
+  }
+
+  const char *m_name;
+#ifdef ENABLE_CHECKING
+  ALLOC_POOL_ID_TYPE m_id;
+#endif
+  size_t m_elts_per_block;
+
+  /* These are the elements that have been allocated at least once and freed.  */
+  allocation_pool_list *m_returned_free_list;
+
+  /* These are the elements that have not yet been allocated out of
+     the last block obtained from XNEWVEC.  */
+  char* m_virgin_free_list;
+
+  /* The number of elements in the virgin_free_list that can be
+     allocated before needing another block.  */
+  size_t m_virgin_elts_remaining;
+  size_t m_elts_allocated;
+  size_t m_elts_free;
+  size_t m_blocks_allocated;
+  allocation_pool_list *m_block_list;
+  size_t m_block_size;
+  size_t m_elt_size;
+};
+
+#ifdef ENABLE_CHECKING
+/* Last used ID.  */
+extern ALLOC_POOL_ID_TYPE last_id;
+#endif
+
+/* Store information about each particular alloc_pool.  Note that this
+   will underestimate the amount the amount of storage used by a small amount:
+   1) The overhead in a pool is not accounted for.
+   2) The unallocated elements in a block are not accounted for.  Note
+   that this can at worst case be one element smaller that the block
+   size for that pool.  */
+struct alloc_pool_descriptor
+{
+  /* Number of pools allocated.  */
+  unsigned long created;
+  /* Gross allocated storage.  */
+  unsigned long allocated;
+  /* Amount of currently active storage. */
+  unsigned long current;
+  /* Peak amount of storage used.  */
+  unsigned long peak;
+  /* Size of element in the pool.  */
+  int elt_size;
+};
+
+
+/* Hashtable mapping alloc_pool names to descriptors.  */
+extern hash_map<const char *, alloc_pool_descriptor> *alloc_pool_hash;
+
+/* For given name, return descriptor, create new if needed.  */
+alloc_pool_descriptor *
+allocate_pool_descriptor (const char *name);
+
+template <typename T>
+inline
+pool_allocator<T>::pool_allocator (const char *name, size_t num,
+				   size_t extra_size, bool ignore_type_size):
+  m_name (name), m_elts_per_block (num), m_returned_free_list (NULL),
+  m_virgin_free_list (NULL), m_virgin_elts_remaining (0), m_elts_allocated (0),
+  m_elts_free (0), m_blocks_allocated (0), m_block_list (NULL)
+{
+  size_t header_size;
+  size_t size = (ignore_type_size ? 0 : sizeof (T)) + extra_size;
+
+  gcc_checking_assert (m_name);
+
+  /* Make size large enough to store the list header.  */
+  if (size < sizeof (allocation_pool_list*))
+    size = sizeof (allocation_pool_list*);
+
+  /* Now align the size to a multiple of 4.  */
+  size = align_eight (size);
+
+#ifdef ENABLE_CHECKING
+  /* Add the aligned size of ID.  */
+  size += offsetof (allocation_object<T>, u.data);
+#endif
+
+  /* Um, we can't really allocate 0 elements per block.  */
+  gcc_checking_assert (m_elts_per_block);
+
+  m_elt_size = size;
+
+  if (GATHER_STATISTICS)
+    {
+      alloc_pool_descriptor *desc = allocate_pool_descriptor (m_name);
+      desc->elt_size = size;
+      desc->created++;
+    }
+
+  /* List header size should be a multiple of 8.  */
+  header_size = align_eight (sizeof (allocation_pool_list));
+
+  m_block_size = (size * m_elts_per_block) + header_size;
+
+#ifdef ENABLE_CHECKING
+  /* Increase the last used ID and use it for this pool.
+     ID == 0 is used for free elements of pool so skip it.  */
+  last_id++;
+  if (last_id == 0)
+    last_id++;
+
+  m_id = last_id;
+#endif
+}
+
+/* Free all memory allocated for the given memory pool.  */
+template <typename T>
+inline void
+pool_allocator<T>::release ()
+{
+  allocation_pool_list *block, *next_block;
+
+  /* Free each block allocated to the pool.  */
+  for (block = m_block_list; block != NULL; block = next_block)
+    {
+      next_block = block->next;
+      free (block);
+    }
+
+  if (GATHER_STATISTICS && false)
+    {
+      alloc_pool_descriptor *desc = allocate_pool_descriptor (m_name);
+      desc->current -= (m_elts_allocated - m_elts_free) * m_elt_size;
+    }
+
+  m_returned_free_list = NULL;
+  m_virgin_free_list = NULL;
+  m_virgin_elts_remaining = 0;
+  m_elts_allocated = 0;
+  m_elts_free = 0;
+  m_blocks_allocated = 0;
+  m_block_list = NULL;
+}
+
+template <typename T>
+void
+inline pool_allocator<T>::release_if_empty ()
+{
+  if (m_elts_free == m_elts_allocated)
+    release ();
+}
+
+template <typename T>
+inline pool_allocator<T>::~pool_allocator()
+{
+  release ();
+}
+
+/* Allocates one element from the pool specified.  */
+template <typename T>
+inline T *
+pool_allocator<T>::allocate ()
+{
+  allocation_pool_list *header;
+#ifdef ENABLE_VALGRIND_ANNOTATIONS
+  int size;
+#endif
+
+  if (GATHER_STATISTICS)
+    {
+      alloc_pool_descriptor *desc = allocate_pool_descriptor (m_name);
+
+      desc->allocated += m_elt_size;
+      desc->current += m_elt_size;
+      if (desc->peak < desc->current)
+	desc->peak = desc->current;
+    }
+
+#ifdef ENABLE_VALGRIND_ANNOTATIONS
+  size = m_elt_size - offsetof (allocation_object<T>, u.data);
+#endif
+
+  /* If there are no more free elements, make some more!.  */
+  if (!m_returned_free_list)
+    {
+      char *block;
+      if (!m_virgin_elts_remaining)
+	{
+	  allocation_pool_list *block_header;
+
+	  /* Make the block.  */
+	  block = XNEWVEC (char, m_block_size);
+	  block_header = (allocation_pool_list*) block;
+	  block += align_eight (sizeof (allocation_pool_list));
+
+	  /* Throw it on the block list.  */
+	  block_header->next = m_block_list;
+	  m_block_list = block_header;
+
+	  /* Make the block available for allocation.  */
+	  m_virgin_free_list = block;
+	  m_virgin_elts_remaining = m_elts_per_block;
+
+	  /* Also update the number of elements we have free/allocated, and
+	     increment the allocated block count.  */
+	  m_elts_allocated += m_elts_per_block;
+	  m_elts_free += m_elts_per_block;
+	  m_blocks_allocated += 1;
+	}
+
+      /* We now know that we can take the first elt off the virgin list and
+	 put it on the returned list. */
+      block = m_virgin_free_list;
+      header = (allocation_pool_list*) allocation_object<T>::get_data (block);
+      header->next = NULL;
+#ifdef ENABLE_CHECKING
+      /* Mark the element to be free.  */
+      ((allocation_object<T> *) block)->id = 0;
+#endif
+      VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (header,size));
+      m_returned_free_list = header;
+      m_virgin_free_list += m_elt_size;
+      m_virgin_elts_remaining--;
+
+    }
+
+  /* Pull the first free element from the free list, and return it.  */
+  header = m_returned_free_list;
+  VALGRIND_DISCARD (VALGRIND_MAKE_MEM_DEFINED (header, sizeof (*header)));
+  m_returned_free_list = header->next;
+  m_elts_free--;
+
+#ifdef ENABLE_CHECKING
+  /* Set the ID for element.  */
+  allocation_object<T>::get_instance (header)->id = m_id;
+#endif
+  VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (header, size));
+
+  /* Call default constructor.  */
+  return (T *)(header);
+}
+
+/* Puts PTR back on POOL's free list.  */
+template <typename T>
+void
+pool_allocator<T>::remove (T *object)
+{
+  allocation_pool_list *header;
+#if defined(ENABLE_VALGRIND_ANNOTATIONS) || defined(ENABLE_CHECKING)
+  int size;
+  size = m_elt_size - offsetof (allocation_object<T>, u.data);
+#endif
+
+#ifdef ENABLE_CHECKING
+  gcc_assert (object
+	      /* Check if we free more than we allocated, which is Bad (TM).  */
+	      && m_elts_free < m_elts_allocated
+	      /* Check whether the PTR was allocated from POOL.  */
+	      && m_id == allocation_object<T>::get_instance (object)->id);
+
+  memset (object, 0xaf, size);
+
+  /* Mark the element to be free.  */
+  allocation_object<T>::get_instance (object)->id = 0;
+#endif
+
+  header = (allocation_pool_list*) object;
+  header->next = m_returned_free_list;
+  m_returned_free_list = header;
+  VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (object, size));
+  m_elts_free++;
+
+  if (GATHER_STATISTICS)
+    {
+      alloc_pool_descriptor *desc = allocate_pool_descriptor (m_name);
+      desc->current -= m_elt_size;
+    }
+}
+
 #endif