diff mbox series

[v2,5/6] mm/mm_init.c: remove unneeded calc_memmap_size()

Message ID 20240325145646.1044760-6-bhe@redhat.com (mailing list archive)
State Handled Elsewhere
Headers show
Series mm/mm_init.c: refactor free_area_init_core() | expand

Commit Message

Baoquan He March 25, 2024, 2:56 p.m. UTC
Nobody calls calc_memmap_size() now.

Signed-off-by: Baoquan He <bhe@redhat.com>
---
 mm/mm_init.c | 20 --------------------
 1 file changed, 20 deletions(-)

Comments

Mike Rapoport March 27, 2024, 4:21 p.m. UTC | #1
On Mon, Mar 25, 2024 at 10:56:45PM +0800, Baoquan He wrote:
> Nobody calls calc_memmap_size() now.
> 
> Signed-off-by: Baoquan He <bhe@redhat.com>

Reviewed-by: Mike Rapoport (IBM) <rppt@kernel.org>

Looks like I replied to patch 6/6 twice by mistake and missed this one.

> ---
>  mm/mm_init.c | 20 --------------------
>  1 file changed, 20 deletions(-)
> 
> diff --git a/mm/mm_init.c b/mm/mm_init.c
> index 7f71e56e83f3..e269a724f70e 100644
> --- a/mm/mm_init.c
> +++ b/mm/mm_init.c
> @@ -1331,26 +1331,6 @@ static void __init calculate_node_totalpages(struct pglist_data *pgdat,
>  	pr_debug("On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages);
>  }
>  
> -static unsigned long __init calc_memmap_size(unsigned long spanned_pages,
> -						unsigned long present_pages)
> -{
> -	unsigned long pages = spanned_pages;
> -
> -	/*
> -	 * Provide a more accurate estimation if there are holes within
> -	 * the zone and SPARSEMEM is in use. If there are holes within the
> -	 * zone, each populated memory region may cost us one or two extra
> -	 * memmap pages due to alignment because memmap pages for each
> -	 * populated regions may not be naturally aligned on page boundary.
> -	 * So the (present_pages >> 4) heuristic is a tradeoff for that.
> -	 */
> -	if (spanned_pages > present_pages + (present_pages >> 4) &&
> -	    IS_ENABLED(CONFIG_SPARSEMEM))
> -		pages = present_pages;
> -
> -	return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
> -}
> -
>  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
>  static void pgdat_init_split_queue(struct pglist_data *pgdat)
>  {
> -- 
> 2.41.0
>
Baoquan He March 28, 2024, 1:24 a.m. UTC | #2
On 03/27/24 at 06:21pm, Mike Rapoport wrote:
> On Mon, Mar 25, 2024 at 10:56:45PM +0800, Baoquan He wrote:
> > Nobody calls calc_memmap_size() now.
> > 
> > Signed-off-by: Baoquan He <bhe@redhat.com>
> 
> Reviewed-by: Mike Rapoport (IBM) <rppt@kernel.org>
> 
> Looks like I replied to patch 6/6 twice by mistake and missed this one.

Thanks for your careful reviewing.

> 
> > ---
> >  mm/mm_init.c | 20 --------------------
> >  1 file changed, 20 deletions(-)
> > 
> > diff --git a/mm/mm_init.c b/mm/mm_init.c
> > index 7f71e56e83f3..e269a724f70e 100644
> > --- a/mm/mm_init.c
> > +++ b/mm/mm_init.c
> > @@ -1331,26 +1331,6 @@ static void __init calculate_node_totalpages(struct pglist_data *pgdat,
> >  	pr_debug("On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages);
> >  }
> >  
> > -static unsigned long __init calc_memmap_size(unsigned long spanned_pages,
> > -						unsigned long present_pages)
> > -{
> > -	unsigned long pages = spanned_pages;
> > -
> > -	/*
> > -	 * Provide a more accurate estimation if there are holes within
> > -	 * the zone and SPARSEMEM is in use. If there are holes within the
> > -	 * zone, each populated memory region may cost us one or two extra
> > -	 * memmap pages due to alignment because memmap pages for each
> > -	 * populated regions may not be naturally aligned on page boundary.
> > -	 * So the (present_pages >> 4) heuristic is a tradeoff for that.
> > -	 */
> > -	if (spanned_pages > present_pages + (present_pages >> 4) &&
> > -	    IS_ENABLED(CONFIG_SPARSEMEM))
> > -		pages = present_pages;
> > -
> > -	return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
> > -}
> > -
> >  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
> >  static void pgdat_init_split_queue(struct pglist_data *pgdat)
> >  {
> > -- 
> > 2.41.0
> > 
> 
> -- 
> Sincerely yours,
> Mike.
>
diff mbox series

Patch

diff --git a/mm/mm_init.c b/mm/mm_init.c
index 7f71e56e83f3..e269a724f70e 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -1331,26 +1331,6 @@  static void __init calculate_node_totalpages(struct pglist_data *pgdat,
 	pr_debug("On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages);
 }
 
-static unsigned long __init calc_memmap_size(unsigned long spanned_pages,
-						unsigned long present_pages)
-{
-	unsigned long pages = spanned_pages;
-
-	/*
-	 * Provide a more accurate estimation if there are holes within
-	 * the zone and SPARSEMEM is in use. If there are holes within the
-	 * zone, each populated memory region may cost us one or two extra
-	 * memmap pages due to alignment because memmap pages for each
-	 * populated regions may not be naturally aligned on page boundary.
-	 * So the (present_pages >> 4) heuristic is a tradeoff for that.
-	 */
-	if (spanned_pages > present_pages + (present_pages >> 4) &&
-	    IS_ENABLED(CONFIG_SPARSEMEM))
-		pages = present_pages;
-
-	return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
-}
-
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 static void pgdat_init_split_queue(struct pglist_data *pgdat)
 {