diff mbox

[14/35] Change use to type-based pool allocator in df-scan.c.

Message ID 55686B7A.7080902@suse.cz
State New
Headers show

Commit Message

Martin Liška May 29, 2015, 1:36 p.m. UTC
On 05/27/2015 03:56 PM, mliska wrote:
> gcc/ChangeLog:
>
> 2015-04-30  Martin Liska  <mliska@suse.cz>
>
> 	* df-scan.c (struct df_scan_problem_data):Use new type-based pool allocator.
> 	(df_scan_free_internal) Likewise.
> 	(df_scan_alloc) Likewise.
> 	(df_grow_reg_info) Likewise.
> 	(df_free_ref) Likewise.
> 	(df_insn_create_insn_record) Likewise.
> 	(df_mw_hardreg_chain_delete) Likewise.
> 	(df_insn_info_delete) Likewise.
> 	(df_free_collection_rec) Likewise.
> 	(df_mw_hardreg_chain_delete_eq_uses) Likewise.
> 	(df_sort_and_compress_mws) Likewise.
> 	(df_ref_create_structure) Likewise.
> 	(df_ref_record) Likewise.
> ---
>   gcc/df-scan.c | 94 +++++++++++++++++++++++++++++------------------------------
>   1 file changed, 46 insertions(+), 48 deletions(-)
>
> diff --git a/gcc/df-scan.c b/gcc/df-scan.c
> index e32eaf5..4646bcf 100644
> --- a/gcc/df-scan.c
> +++ b/gcc/df-scan.c
> @@ -159,15 +159,18 @@ static const unsigned int copy_all = copy_defs | copy_uses | copy_eq_uses
>      it gets run.  It also has no need for the iterative solver.
>   ----------------------------------------------------------------------------*/
>
> +#define SCAN_PROBLEM_DATA_BLOCK_SIZE 512
> +
>   /* Problem data for the scanning dataflow function.  */
>   struct df_scan_problem_data
>   {
> -  alloc_pool ref_base_pool;
> -  alloc_pool ref_artificial_pool;
> -  alloc_pool ref_regular_pool;
> -  alloc_pool insn_pool;
> -  alloc_pool reg_pool;
> -  alloc_pool mw_reg_pool;
> +  pool_allocator<df_base_ref> *ref_base_pool;
> +  pool_allocator<df_artificial_ref> *ref_artificial_pool;
> +  pool_allocator<df_regular_ref> *ref_regular_pool;
> +  pool_allocator<df_insn_info> *insn_pool;
> +  pool_allocator<df_reg_info> *reg_pool;
> +  pool_allocator<df_mw_hardreg> *mw_reg_pool;
> +
>     bitmap_obstack reg_bitmaps;
>     bitmap_obstack insn_bitmaps;
>   };
> @@ -218,12 +221,12 @@ df_scan_free_internal (void)
>     bitmap_clear (&df->insns_to_rescan);
>     bitmap_clear (&df->insns_to_notes_rescan);
>
> -  free_alloc_pool (problem_data->ref_base_pool);
> -  free_alloc_pool (problem_data->ref_artificial_pool);
> -  free_alloc_pool (problem_data->ref_regular_pool);
> -  free_alloc_pool (problem_data->insn_pool);
> -  free_alloc_pool (problem_data->reg_pool);
> -  free_alloc_pool (problem_data->mw_reg_pool);
> +  delete problem_data->ref_base_pool;
> +  delete problem_data->ref_artificial_pool;
> +  delete problem_data->ref_regular_pool;
> +  delete problem_data->insn_pool;
> +  delete problem_data->reg_pool;
> +  delete problem_data->mw_reg_pool;
>     bitmap_obstack_release (&problem_data->reg_bitmaps);
>     bitmap_obstack_release (&problem_data->insn_bitmaps);
>     free (df_scan->problem_data);
> @@ -264,7 +267,6 @@ df_scan_alloc (bitmap all_blocks ATTRIBUTE_UNUSED)
>   {
>     struct df_scan_problem_data *problem_data;
>     unsigned int insn_num = get_max_uid () + 1;
> -  unsigned int block_size = 512;
>     basic_block bb;
>
>     /* Given the number of pools, this is really faster than tearing
> @@ -276,24 +278,18 @@ df_scan_alloc (bitmap all_blocks ATTRIBUTE_UNUSED)
>     df_scan->problem_data = problem_data;
>     df_scan->computed = true;
>
> -  problem_data->ref_base_pool
> -    = create_alloc_pool ("df_scan ref base",
> -			 sizeof (struct df_base_ref), block_size);
> -  problem_data->ref_artificial_pool
> -    = create_alloc_pool ("df_scan ref artificial",
> -			 sizeof (struct df_artificial_ref), block_size);
> -  problem_data->ref_regular_pool
> -    = create_alloc_pool ("df_scan ref regular",
> -			 sizeof (struct df_regular_ref), block_size);
> -  problem_data->insn_pool
> -    = create_alloc_pool ("df_scan insn",
> -			 sizeof (struct df_insn_info), block_size);
> -  problem_data->reg_pool
> -    = create_alloc_pool ("df_scan reg",
> -			 sizeof (struct df_reg_info), block_size);
> -  problem_data->mw_reg_pool
> -    = create_alloc_pool ("df_scan mw_reg",
> -			 sizeof (struct df_mw_hardreg), block_size / 16);
> +  problem_data->ref_base_pool = new pool_allocator<df_base_ref>
> +    ("df_scan ref base", SCAN_PROBLEM_DATA_BLOCK_SIZE);
> +  problem_data->ref_artificial_pool = new pool_allocator<df_artificial_ref>
> +    ("df_scan ref artificial", SCAN_PROBLEM_DATA_BLOCK_SIZE);
> +  problem_data->ref_regular_pool = new pool_allocator<df_regular_ref>
> +    ("df_scan ref regular", SCAN_PROBLEM_DATA_BLOCK_SIZE);
> +  problem_data->insn_pool = new pool_allocator<df_insn_info>
> +    ("df_scan insn", SCAN_PROBLEM_DATA_BLOCK_SIZE);
> +  problem_data->reg_pool = new pool_allocator<df_reg_info>
> +    ("df_scan reg", SCAN_PROBLEM_DATA_BLOCK_SIZE);
> +  problem_data->mw_reg_pool = new pool_allocator<df_mw_hardreg>
> +    ("df_scan mw_reg", SCAN_PROBLEM_DATA_BLOCK_SIZE / 16);
>
>     bitmap_obstack_initialize (&problem_data->reg_bitmaps);
>     bitmap_obstack_initialize (&problem_data->insn_bitmaps);
> @@ -519,13 +515,14 @@ df_grow_reg_info (void)
>       {
>         struct df_reg_info *reg_info;
>
> -      reg_info = (struct df_reg_info *) pool_alloc (problem_data->reg_pool);
> +      // TODO
> +      reg_info = problem_data->reg_pool->allocate ();
>         memset (reg_info, 0, sizeof (struct df_reg_info));
>         df->def_regs[i] = reg_info;
> -      reg_info = (struct df_reg_info *) pool_alloc (problem_data->reg_pool);
> +      reg_info = problem_data->reg_pool->allocate ();
>         memset (reg_info, 0, sizeof (struct df_reg_info));
>         df->use_regs[i] = reg_info;
> -      reg_info = (struct df_reg_info *) pool_alloc (problem_data->reg_pool);
> +      reg_info = problem_data->reg_pool->allocate ();
>         memset (reg_info, 0, sizeof (struct df_reg_info));
>         df->eq_use_regs[i] = reg_info;
>         df->def_info.begin[i] = 0;
> @@ -740,15 +737,17 @@ df_free_ref (df_ref ref)
>     switch (DF_REF_CLASS (ref))
>       {
>       case DF_REF_BASE:
> -      pool_free (problem_data->ref_base_pool, ref);
> +      problem_data->ref_base_pool->remove ((df_base_ref *) (ref));
>         break;
>
>       case DF_REF_ARTIFICIAL:
> -      pool_free (problem_data->ref_artificial_pool, ref);
> +      problem_data->ref_artificial_pool->remove
> +	((df_artificial_ref *) (ref));
>         break;
>
>       case DF_REF_REGULAR:
> -      pool_free (problem_data->ref_regular_pool, ref);
> +      problem_data->ref_regular_pool->remove
> +	((df_regular_ref *) (ref));
>         break;
>       }
>   }
> @@ -851,7 +850,7 @@ df_insn_create_insn_record (rtx_insn *insn)
>     insn_rec = DF_INSN_INFO_GET (insn);
>     if (!insn_rec)
>       {
> -      insn_rec = (struct df_insn_info *) pool_alloc (problem_data->insn_pool);
> +      insn_rec = problem_data->insn_pool->allocate ();
>         DF_INSN_INFO_SET (insn, insn_rec);
>       }
>     memset (insn_rec, 0, sizeof (struct df_insn_info));
> @@ -899,7 +898,7 @@ df_mw_hardreg_chain_delete (struct df_mw_hardreg *hardregs)
>     for (; hardregs; hardregs = next)
>       {
>         next = DF_MWS_NEXT (hardregs);
> -      pool_free (problem_data->mw_reg_pool, hardregs);
> +      problem_data->mw_reg_pool->remove (hardregs);
>       }
>   }
>
> @@ -940,7 +939,7 @@ df_insn_info_delete (unsigned int uid)
>         df_ref_chain_delete (insn_info->uses);
>         df_ref_chain_delete (insn_info->eq_uses);
>
> -      pool_free (problem_data->insn_pool, insn_info);
> +      problem_data->insn_pool->remove (insn_info);
>         DF_INSN_UID_SET (uid, NULL);
>       }
>   }
> @@ -1024,7 +1023,7 @@ df_free_collection_rec (struct df_collection_rec *collection_rec)
>     FOR_EACH_VEC_ELT (collection_rec->eq_use_vec, ix, ref)
>       df_free_ref (ref);
>     FOR_EACH_VEC_ELT (collection_rec->mw_vec, ix, mw)
> -    pool_free (problem_data->mw_reg_pool, mw);
> +    problem_data->mw_reg_pool->remove (mw);
>
>     collection_rec->def_vec.release ();
>     collection_rec->use_vec.release ();
> @@ -1949,7 +1948,7 @@ df_mw_hardreg_chain_delete_eq_uses (struct df_insn_info *insn_info)
>         if (mw->flags & DF_REF_IN_NOTE)
>   	{
>   	  *mw_ptr = DF_MWS_NEXT (mw);
> -	  pool_free (problem_data->mw_reg_pool, mw);
> +	  problem_data->mw_reg_pool->remove (mw);
>   	}
>         else
>   	mw_ptr = &DF_MWS_NEXT (mw);
> @@ -2296,8 +2295,7 @@ df_sort_and_compress_mws (vec<df_mw_hardreg_ptr, va_heap> *mw_vec)
>         while (i + dist + 1 < count
>   	     && df_mw_equal_p ((*mw_vec)[i], (*mw_vec)[i + dist + 1]))
>   	{
> -	  pool_free (problem_data->mw_reg_pool,
> -		     (*mw_vec)[i + dist + 1]);
> +	  problem_data->mw_reg_pool->remove ((*mw_vec)[i + dist + 1]);
>   	  dist++;
>   	}
>         /* Copy it down to the next position.  */
> @@ -2525,18 +2523,18 @@ df_ref_create_structure (enum df_ref_class cl,
>     switch (cl)
>       {
>       case DF_REF_BASE:
> -      this_ref = (df_ref) pool_alloc (problem_data->ref_base_pool);
> +      this_ref = (df_ref) (problem_data->ref_base_pool->allocate ());
>         gcc_checking_assert (loc == NULL);
>         break;
>
>       case DF_REF_ARTIFICIAL:
> -      this_ref = (df_ref) pool_alloc (problem_data->ref_artificial_pool);
> +      this_ref = (df_ref) (problem_data->ref_artificial_pool->allocate ());
>         this_ref->artificial_ref.bb = bb;
>         gcc_checking_assert (loc == NULL);
>         break;
>
>       case DF_REF_REGULAR:
> -      this_ref = (df_ref) pool_alloc (problem_data->ref_regular_pool);
> +      this_ref = (df_ref) (problem_data->ref_regular_pool->allocate ());
>         this_ref->regular_ref.loc = loc;
>         gcc_checking_assert (loc);
>         break;
> @@ -2638,7 +2636,7 @@ df_ref_record (enum df_ref_class cl,
>   	    ref_flags |= DF_REF_PARTIAL;
>   	  ref_flags |= DF_REF_MW_HARDREG;
>
> -	  hardreg = (struct df_mw_hardreg *) pool_alloc (problem_data->mw_reg_pool);
> +	  hardreg = problem_data->mw_reg_pool->allocate ();
>   	  hardreg->type = ref_type;
>   	  hardreg->flags = ref_flags;
>   	  hardreg->mw_reg = reg;
>

v2.
diff mbox

Patch

From 2168bcd7c3f63a06372d4b2de3959397cf9105af Mon Sep 17 00:00:00 2001
From: mliska <mliska@suse.cz>
Date: Wed, 27 May 2015 15:56:48 +0200
Subject: [PATCH 13/32] Change use to type-based pool allocator in df-scan.c.

gcc/ChangeLog:

2015-04-30  Martin Liska  <mliska@suse.cz>

	* df-scan.c (struct df_scan_problem_data):Use new type-based pool allocator.
	(df_scan_free_internal) Likewise.
	(df_scan_alloc) Likewise.
	(df_grow_reg_info) Likewise.
	(df_free_ref) Likewise.
	(df_insn_create_insn_record) Likewise.
	(df_mw_hardreg_chain_delete) Likewise.
	(df_insn_info_delete) Likewise.
	(df_free_collection_rec) Likewise.
	(df_mw_hardreg_chain_delete_eq_uses) Likewise.
	(df_sort_and_compress_mws) Likewise.
	(df_ref_create_structure) Likewise.
	(df_ref_record) Likewise.
---
 gcc/df-scan.c | 94 +++++++++++++++++++++++++++++------------------------------
 1 file changed, 46 insertions(+), 48 deletions(-)

diff --git a/gcc/df-scan.c b/gcc/df-scan.c
index e32eaf5..4646bcf 100644
--- a/gcc/df-scan.c
+++ b/gcc/df-scan.c
@@ -159,15 +159,18 @@  static const unsigned int copy_all = copy_defs | copy_uses | copy_eq_uses
    it gets run.  It also has no need for the iterative solver.
 ----------------------------------------------------------------------------*/
 
+#define SCAN_PROBLEM_DATA_BLOCK_SIZE 512
+
 /* Problem data for the scanning dataflow function.  */
 struct df_scan_problem_data
 {
-  alloc_pool ref_base_pool;
-  alloc_pool ref_artificial_pool;
-  alloc_pool ref_regular_pool;
-  alloc_pool insn_pool;
-  alloc_pool reg_pool;
-  alloc_pool mw_reg_pool;
+  pool_allocator<df_base_ref> *ref_base_pool;
+  pool_allocator<df_artificial_ref> *ref_artificial_pool;
+  pool_allocator<df_regular_ref> *ref_regular_pool;
+  pool_allocator<df_insn_info> *insn_pool;
+  pool_allocator<df_reg_info> *reg_pool;
+  pool_allocator<df_mw_hardreg> *mw_reg_pool;
+
   bitmap_obstack reg_bitmaps;
   bitmap_obstack insn_bitmaps;
 };
@@ -218,12 +221,12 @@  df_scan_free_internal (void)
   bitmap_clear (&df->insns_to_rescan);
   bitmap_clear (&df->insns_to_notes_rescan);
 
-  free_alloc_pool (problem_data->ref_base_pool);
-  free_alloc_pool (problem_data->ref_artificial_pool);
-  free_alloc_pool (problem_data->ref_regular_pool);
-  free_alloc_pool (problem_data->insn_pool);
-  free_alloc_pool (problem_data->reg_pool);
-  free_alloc_pool (problem_data->mw_reg_pool);
+  delete problem_data->ref_base_pool;
+  delete problem_data->ref_artificial_pool;
+  delete problem_data->ref_regular_pool;
+  delete problem_data->insn_pool;
+  delete problem_data->reg_pool;
+  delete problem_data->mw_reg_pool;
   bitmap_obstack_release (&problem_data->reg_bitmaps);
   bitmap_obstack_release (&problem_data->insn_bitmaps);
   free (df_scan->problem_data);
@@ -264,7 +267,6 @@  df_scan_alloc (bitmap all_blocks ATTRIBUTE_UNUSED)
 {
   struct df_scan_problem_data *problem_data;
   unsigned int insn_num = get_max_uid () + 1;
-  unsigned int block_size = 512;
   basic_block bb;
 
   /* Given the number of pools, this is really faster than tearing
@@ -276,24 +278,18 @@  df_scan_alloc (bitmap all_blocks ATTRIBUTE_UNUSED)
   df_scan->problem_data = problem_data;
   df_scan->computed = true;
 
-  problem_data->ref_base_pool
-    = create_alloc_pool ("df_scan ref base",
-			 sizeof (struct df_base_ref), block_size);
-  problem_data->ref_artificial_pool
-    = create_alloc_pool ("df_scan ref artificial",
-			 sizeof (struct df_artificial_ref), block_size);
-  problem_data->ref_regular_pool
-    = create_alloc_pool ("df_scan ref regular",
-			 sizeof (struct df_regular_ref), block_size);
-  problem_data->insn_pool
-    = create_alloc_pool ("df_scan insn",
-			 sizeof (struct df_insn_info), block_size);
-  problem_data->reg_pool
-    = create_alloc_pool ("df_scan reg",
-			 sizeof (struct df_reg_info), block_size);
-  problem_data->mw_reg_pool
-    = create_alloc_pool ("df_scan mw_reg",
-			 sizeof (struct df_mw_hardreg), block_size / 16);
+  problem_data->ref_base_pool = new pool_allocator<df_base_ref>
+    ("df_scan ref base", SCAN_PROBLEM_DATA_BLOCK_SIZE);
+  problem_data->ref_artificial_pool = new pool_allocator<df_artificial_ref>
+    ("df_scan ref artificial", SCAN_PROBLEM_DATA_BLOCK_SIZE);
+  problem_data->ref_regular_pool = new pool_allocator<df_regular_ref>
+    ("df_scan ref regular", SCAN_PROBLEM_DATA_BLOCK_SIZE);
+  problem_data->insn_pool = new pool_allocator<df_insn_info>
+    ("df_scan insn", SCAN_PROBLEM_DATA_BLOCK_SIZE);
+  problem_data->reg_pool = new pool_allocator<df_reg_info>
+    ("df_scan reg", SCAN_PROBLEM_DATA_BLOCK_SIZE);
+  problem_data->mw_reg_pool = new pool_allocator<df_mw_hardreg>
+    ("df_scan mw_reg", SCAN_PROBLEM_DATA_BLOCK_SIZE / 16);
 
   bitmap_obstack_initialize (&problem_data->reg_bitmaps);
   bitmap_obstack_initialize (&problem_data->insn_bitmaps);
@@ -519,13 +515,14 @@  df_grow_reg_info (void)
     {
       struct df_reg_info *reg_info;
 
-      reg_info = (struct df_reg_info *) pool_alloc (problem_data->reg_pool);
+      // TODO
+      reg_info = problem_data->reg_pool->allocate ();
       memset (reg_info, 0, sizeof (struct df_reg_info));
       df->def_regs[i] = reg_info;
-      reg_info = (struct df_reg_info *) pool_alloc (problem_data->reg_pool);
+      reg_info = problem_data->reg_pool->allocate ();
       memset (reg_info, 0, sizeof (struct df_reg_info));
       df->use_regs[i] = reg_info;
-      reg_info = (struct df_reg_info *) pool_alloc (problem_data->reg_pool);
+      reg_info = problem_data->reg_pool->allocate ();
       memset (reg_info, 0, sizeof (struct df_reg_info));
       df->eq_use_regs[i] = reg_info;
       df->def_info.begin[i] = 0;
@@ -740,15 +737,17 @@  df_free_ref (df_ref ref)
   switch (DF_REF_CLASS (ref))
     {
     case DF_REF_BASE:
-      pool_free (problem_data->ref_base_pool, ref);
+      problem_data->ref_base_pool->remove ((df_base_ref *) (ref));
       break;
 
     case DF_REF_ARTIFICIAL:
-      pool_free (problem_data->ref_artificial_pool, ref);
+      problem_data->ref_artificial_pool->remove
+	((df_artificial_ref *) (ref));
       break;
 
     case DF_REF_REGULAR:
-      pool_free (problem_data->ref_regular_pool, ref);
+      problem_data->ref_regular_pool->remove
+	((df_regular_ref *) (ref));
       break;
     }
 }
@@ -851,7 +850,7 @@  df_insn_create_insn_record (rtx_insn *insn)
   insn_rec = DF_INSN_INFO_GET (insn);
   if (!insn_rec)
     {
-      insn_rec = (struct df_insn_info *) pool_alloc (problem_data->insn_pool);
+      insn_rec = problem_data->insn_pool->allocate ();
       DF_INSN_INFO_SET (insn, insn_rec);
     }
   memset (insn_rec, 0, sizeof (struct df_insn_info));
@@ -899,7 +898,7 @@  df_mw_hardreg_chain_delete (struct df_mw_hardreg *hardregs)
   for (; hardregs; hardregs = next)
     {
       next = DF_MWS_NEXT (hardregs);
-      pool_free (problem_data->mw_reg_pool, hardregs);
+      problem_data->mw_reg_pool->remove (hardregs);
     }
 }
 
@@ -940,7 +939,7 @@  df_insn_info_delete (unsigned int uid)
       df_ref_chain_delete (insn_info->uses);
       df_ref_chain_delete (insn_info->eq_uses);
 
-      pool_free (problem_data->insn_pool, insn_info);
+      problem_data->insn_pool->remove (insn_info);
       DF_INSN_UID_SET (uid, NULL);
     }
 }
@@ -1024,7 +1023,7 @@  df_free_collection_rec (struct df_collection_rec *collection_rec)
   FOR_EACH_VEC_ELT (collection_rec->eq_use_vec, ix, ref)
     df_free_ref (ref);
   FOR_EACH_VEC_ELT (collection_rec->mw_vec, ix, mw)
-    pool_free (problem_data->mw_reg_pool, mw);
+    problem_data->mw_reg_pool->remove (mw);
 
   collection_rec->def_vec.release ();
   collection_rec->use_vec.release ();
@@ -1949,7 +1948,7 @@  df_mw_hardreg_chain_delete_eq_uses (struct df_insn_info *insn_info)
       if (mw->flags & DF_REF_IN_NOTE)
 	{
 	  *mw_ptr = DF_MWS_NEXT (mw);
-	  pool_free (problem_data->mw_reg_pool, mw);
+	  problem_data->mw_reg_pool->remove (mw);
 	}
       else
 	mw_ptr = &DF_MWS_NEXT (mw);
@@ -2296,8 +2295,7 @@  df_sort_and_compress_mws (vec<df_mw_hardreg_ptr, va_heap> *mw_vec)
       while (i + dist + 1 < count
 	     && df_mw_equal_p ((*mw_vec)[i], (*mw_vec)[i + dist + 1]))
 	{
-	  pool_free (problem_data->mw_reg_pool,
-		     (*mw_vec)[i + dist + 1]);
+	  problem_data->mw_reg_pool->remove ((*mw_vec)[i + dist + 1]);
 	  dist++;
 	}
       /* Copy it down to the next position.  */
@@ -2525,18 +2523,18 @@  df_ref_create_structure (enum df_ref_class cl,
   switch (cl)
     {
     case DF_REF_BASE:
-      this_ref = (df_ref) pool_alloc (problem_data->ref_base_pool);
+      this_ref = (df_ref) (problem_data->ref_base_pool->allocate ());
       gcc_checking_assert (loc == NULL);
       break;
 
     case DF_REF_ARTIFICIAL:
-      this_ref = (df_ref) pool_alloc (problem_data->ref_artificial_pool);
+      this_ref = (df_ref) (problem_data->ref_artificial_pool->allocate ());
       this_ref->artificial_ref.bb = bb;
       gcc_checking_assert (loc == NULL);
       break;
 
     case DF_REF_REGULAR:
-      this_ref = (df_ref) pool_alloc (problem_data->ref_regular_pool);
+      this_ref = (df_ref) (problem_data->ref_regular_pool->allocate ());
       this_ref->regular_ref.loc = loc;
       gcc_checking_assert (loc);
       break;
@@ -2638,7 +2636,7 @@  df_ref_record (enum df_ref_class cl,
 	    ref_flags |= DF_REF_PARTIAL;
 	  ref_flags |= DF_REF_MW_HARDREG;
 
-	  hardreg = (struct df_mw_hardreg *) pool_alloc (problem_data->mw_reg_pool);
+	  hardreg = problem_data->mw_reg_pool->allocate ();
 	  hardreg->type = ref_type;
 	  hardreg->flags = ref_flags;
 	  hardreg->mw_reg = reg;
-- 
2.1.4