diff mbox series

[4/5] ext4: use correct criteria name instead stale integer number in comment

Message ID 20240326213823.528302-5-shikemeng@huaweicloud.com
State New
Headers show
Series Minor improvements and cleanups to ext4 mballoc | expand

Commit Message

Kemeng Shi March 26, 2024, 9:38 p.m. UTC
Use correct criteria name instead stale integer number in comment

Signed-off-by: Kemeng Shi <shikemeng@huaweicloud.com>
---
 fs/ext4/ext4.h    | 15 ++++++++++++---
 fs/ext4/mballoc.c | 14 ++++++++------
 fs/ext4/mballoc.h |  4 ++--
 3 files changed, 22 insertions(+), 11 deletions(-)

Comments

Ojaswin Mujoo March 29, 2024, 7:15 a.m. UTC | #1
On Wed, Mar 27, 2024 at 05:38:22AM +0800, Kemeng Shi wrote:
> Use correct criteria name instead stale integer number in comment
> 
> Signed-off-by: Kemeng Shi <shikemeng@huaweicloud.com>
> ---
>  fs/ext4/ext4.h    | 15 ++++++++++++---
>  fs/ext4/mballoc.c | 14 ++++++++------
>  fs/ext4/mballoc.h |  4 ++--
>  3 files changed, 22 insertions(+), 11 deletions(-)
> 

Thanks for the cleanup! Feel free to add:

Reviewed-by: Ojaswin Mujoo <ojaswin@linux.ibm.com>

> diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
> index 023571f8dd1b..9b90013c59a3 100644
> --- a/fs/ext4/ext4.h
> +++ b/fs/ext4/ext4.h
> @@ -213,11 +213,20 @@ enum criteria {
>  #define EXT4_MB_USE_RESERVED		0x2000
>  /* Do strict check for free blocks while retrying block allocation */
>  #define EXT4_MB_STRICT_CHECK		0x4000
> -/* Large fragment size list lookup succeeded at least once for cr = 0 */
> +/*
> + * Large fragment size list lookup succeeded at least once for cr =
> + * CR_POWER2_ALIGNED
> + */
>  #define EXT4_MB_CR_POWER2_ALIGNED_OPTIMIZED		0x8000
> -/* Avg fragment size rb tree lookup succeeded at least once for cr = 1 */
> +/*
> + * Avg fragment size rb tree lookup succeeded at least once for cr =
> + * CR_GOAL_LEN_FAST
> + */
>  #define EXT4_MB_CR_GOAL_LEN_FAST_OPTIMIZED		0x00010000
> -/* Avg fragment size rb tree lookup succeeded at least once for cr = 1.5 */
> +/*
> + * Avg fragment size rb tree lookup succeeded at least once for cr =
> + * CR_BEST_AVAIL_LEN
> + */
>  #define EXT4_MB_CR_BEST_AVAIL_LEN_OPTIMIZED		0x00020000
>  
>  struct ext4_allocation_request {
> diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
> index 62d468379722..0f8a34513bf6 100644
> --- a/fs/ext4/mballoc.c
> +++ b/fs/ext4/mballoc.c
> @@ -1131,8 +1131,9 @@ static void ext4_mb_choose_next_group(struct ext4_allocation_context *ac,
>  		ext4_mb_choose_next_group_best_avail(ac, new_cr, group);
>  	} else {
>  		/*
> -		 * TODO: For CR=2, we can arrange groups in an rb tree sorted by
> -		 * bb_free. But until that happens, we should never come here.
> +		 * TODO: For CR=CR_GOAL_LEN_SLOW, we can arrange groups in an
> +		 * rb tree sorted by bb_free. But until that happens, we should
> +		 * never come here.
>  		 */
>  		WARN_ON(1);
>  	}
> @@ -3444,10 +3445,11 @@ static int ext4_mb_init_backend(struct super_block *sb)
>  	}
>  	if (sbi->s_mb_prefetch > ext4_get_groups_count(sb))
>  		sbi->s_mb_prefetch = ext4_get_groups_count(sb);
> -	/* now many real IOs to prefetch within a single allocation at cr=0
> -	 * given cr=0 is an CPU-related optimization we shouldn't try to
> -	 * load too many groups, at some point we should start to use what
> -	 * we've got in memory.
> +	/*
> +	 * now many real IOs to prefetch within a single allocation at
> +	 * cr=CR_POWER2_ALIGNED. Given cr=CR_POWER2_ALIGNED is an CPU-related
> +	 * optimization we shouldn't try to load too many groups, at some point
> +	 * we should start to use what we've got in memory.
>  	 * with an average random access time 5ms, it'd take a second to get
>  	 * 200 groups (* N with flex_bg), so let's make this limit 4
>  	 */
> diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h
> index 56938532b4ce..042437d8860f 100644
> --- a/fs/ext4/mballoc.h
> +++ b/fs/ext4/mballoc.h
> @@ -187,8 +187,8 @@ struct ext4_allocation_context {
>  	struct ext4_free_extent ac_f_ex;
>  
>  	/*
> -	 * goal len can change in CR1.5, so save the original len. This is
> -	 * used while adjusting the PA window and for accounting.
> +	 * goal len can change in CR_BEST_AVAIL_LEN, so save the original len.
> +	 * This is used while adjusting the PA window and for accounting.
>  	 */
>  	ext4_grpblk_t	ac_orig_goal_len;
>  
> -- 
> 2.30.0
>
Jan Kara April 4, 2024, 2:19 p.m. UTC | #2
On Wed 27-03-24 05:38:22, Kemeng Shi wrote:
> Use correct criteria name instead stale integer number in comment
> 
> Signed-off-by: Kemeng Shi <shikemeng@huaweicloud.com>

Looks good. But since the symbolic names already have CR prefix, we
probably don't have to write e.g.:

/* Large fragment size list lookup succeeded at least once for cr =
 * CR_POWER2_ALIGNED */

But we can write directly:

/* Large fragment size list lookup succeeded at least once for
 * CR_POWER2_ALIGNED */

								Honza

> ---
>  fs/ext4/ext4.h    | 15 ++++++++++++---
>  fs/ext4/mballoc.c | 14 ++++++++------
>  fs/ext4/mballoc.h |  4 ++--
>  3 files changed, 22 insertions(+), 11 deletions(-)
> 
> diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
> index 023571f8dd1b..9b90013c59a3 100644
> --- a/fs/ext4/ext4.h
> +++ b/fs/ext4/ext4.h
> @@ -213,11 +213,20 @@ enum criteria {
>  #define EXT4_MB_USE_RESERVED		0x2000
>  /* Do strict check for free blocks while retrying block allocation */
>  #define EXT4_MB_STRICT_CHECK		0x4000
> -/* Large fragment size list lookup succeeded at least once for cr = 0 */
> +/*
> + * Large fragment size list lookup succeeded at least once for cr =
> + * CR_POWER2_ALIGNED
> + */
>  #define EXT4_MB_CR_POWER2_ALIGNED_OPTIMIZED		0x8000
> -/* Avg fragment size rb tree lookup succeeded at least once for cr = 1 */
> +/*
> + * Avg fragment size rb tree lookup succeeded at least once for cr =
> + * CR_GOAL_LEN_FAST
> + */
>  #define EXT4_MB_CR_GOAL_LEN_FAST_OPTIMIZED		0x00010000
> -/* Avg fragment size rb tree lookup succeeded at least once for cr = 1.5 */
> +/*
> + * Avg fragment size rb tree lookup succeeded at least once for cr =
> + * CR_BEST_AVAIL_LEN
> + */
>  #define EXT4_MB_CR_BEST_AVAIL_LEN_OPTIMIZED		0x00020000
>  
>  struct ext4_allocation_request {
> diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
> index 62d468379722..0f8a34513bf6 100644
> --- a/fs/ext4/mballoc.c
> +++ b/fs/ext4/mballoc.c
> @@ -1131,8 +1131,9 @@ static void ext4_mb_choose_next_group(struct ext4_allocation_context *ac,
>  		ext4_mb_choose_next_group_best_avail(ac, new_cr, group);
>  	} else {
>  		/*
> -		 * TODO: For CR=2, we can arrange groups in an rb tree sorted by
> -		 * bb_free. But until that happens, we should never come here.
> +		 * TODO: For CR=CR_GOAL_LEN_SLOW, we can arrange groups in an
> +		 * rb tree sorted by bb_free. But until that happens, we should
> +		 * never come here.
>  		 */
>  		WARN_ON(1);
>  	}
> @@ -3444,10 +3445,11 @@ static int ext4_mb_init_backend(struct super_block *sb)
>  	}
>  	if (sbi->s_mb_prefetch > ext4_get_groups_count(sb))
>  		sbi->s_mb_prefetch = ext4_get_groups_count(sb);
> -	/* now many real IOs to prefetch within a single allocation at cr=0
> -	 * given cr=0 is an CPU-related optimization we shouldn't try to
> -	 * load too many groups, at some point we should start to use what
> -	 * we've got in memory.
> +	/*
> +	 * now many real IOs to prefetch within a single allocation at
> +	 * cr=CR_POWER2_ALIGNED. Given cr=CR_POWER2_ALIGNED is an CPU-related
> +	 * optimization we shouldn't try to load too many groups, at some point
> +	 * we should start to use what we've got in memory.
>  	 * with an average random access time 5ms, it'd take a second to get
>  	 * 200 groups (* N with flex_bg), so let's make this limit 4
>  	 */
> diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h
> index 56938532b4ce..042437d8860f 100644
> --- a/fs/ext4/mballoc.h
> +++ b/fs/ext4/mballoc.h
> @@ -187,8 +187,8 @@ struct ext4_allocation_context {
>  	struct ext4_free_extent ac_f_ex;
>  
>  	/*
> -	 * goal len can change in CR1.5, so save the original len. This is
> -	 * used while adjusting the PA window and for accounting.
> +	 * goal len can change in CR_BEST_AVAIL_LEN, so save the original len.
> +	 * This is used while adjusting the PA window and for accounting.
>  	 */
>  	ext4_grpblk_t	ac_orig_goal_len;
>  
> -- 
> 2.30.0
>
Kemeng Shi April 7, 2024, 3:21 a.m. UTC | #3
on 4/4/2024 10:19 PM, Jan Kara wrote:
> On Wed 27-03-24 05:38:22, Kemeng Shi wrote:
>> Use correct criteria name instead stale integer number in comment
>>
>> Signed-off-by: Kemeng Shi <shikemeng@huaweicloud.com>
> 
> Looks good. But since the symbolic names already have CR prefix, we
> probably don't have to write e.g.:
> 
> /* Large fragment size list lookup succeeded at least once for cr =
>  * CR_POWER2_ALIGNED */
> 
> But we can write directly:
> 
> /* Large fragment size list lookup succeeded at least once for
>  * CR_POWER2_ALIGNED */
Sure, will do it in next version. Thanks.

Kemeng
> 
> 								Honza
> 
>> ---
>>  fs/ext4/ext4.h    | 15 ++++++++++++---
>>  fs/ext4/mballoc.c | 14 ++++++++------
>>  fs/ext4/mballoc.h |  4 ++--
>>  3 files changed, 22 insertions(+), 11 deletions(-)
>>
>> diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
>> index 023571f8dd1b..9b90013c59a3 100644
>> --- a/fs/ext4/ext4.h
>> +++ b/fs/ext4/ext4.h
>> @@ -213,11 +213,20 @@ enum criteria {
>>  #define EXT4_MB_USE_RESERVED		0x2000
>>  /* Do strict check for free blocks while retrying block allocation */
>>  #define EXT4_MB_STRICT_CHECK		0x4000
>> -/* Large fragment size list lookup succeeded at least once for cr = 0 */
>> +/*
>> + * Large fragment size list lookup succeeded at least once for cr =
>> + * CR_POWER2_ALIGNED
>> + */
>>  #define EXT4_MB_CR_POWER2_ALIGNED_OPTIMIZED		0x8000
>> -/* Avg fragment size rb tree lookup succeeded at least once for cr = 1 */
>> +/*
>> + * Avg fragment size rb tree lookup succeeded at least once for cr =
>> + * CR_GOAL_LEN_FAST
>> + */
>>  #define EXT4_MB_CR_GOAL_LEN_FAST_OPTIMIZED		0x00010000
>> -/* Avg fragment size rb tree lookup succeeded at least once for cr = 1.5 */
>> +/*
>> + * Avg fragment size rb tree lookup succeeded at least once for cr =
>> + * CR_BEST_AVAIL_LEN
>> + */
>>  #define EXT4_MB_CR_BEST_AVAIL_LEN_OPTIMIZED		0x00020000
>>  
>>  struct ext4_allocation_request {
>> diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
>> index 62d468379722..0f8a34513bf6 100644
>> --- a/fs/ext4/mballoc.c
>> +++ b/fs/ext4/mballoc.c
>> @@ -1131,8 +1131,9 @@ static void ext4_mb_choose_next_group(struct ext4_allocation_context *ac,
>>  		ext4_mb_choose_next_group_best_avail(ac, new_cr, group);
>>  	} else {
>>  		/*
>> -		 * TODO: For CR=2, we can arrange groups in an rb tree sorted by
>> -		 * bb_free. But until that happens, we should never come here.
>> +		 * TODO: For CR=CR_GOAL_LEN_SLOW, we can arrange groups in an
>> +		 * rb tree sorted by bb_free. But until that happens, we should
>> +		 * never come here.
>>  		 */
>>  		WARN_ON(1);
>>  	}
>> @@ -3444,10 +3445,11 @@ static int ext4_mb_init_backend(struct super_block *sb)
>>  	}
>>  	if (sbi->s_mb_prefetch > ext4_get_groups_count(sb))
>>  		sbi->s_mb_prefetch = ext4_get_groups_count(sb);
>> -	/* now many real IOs to prefetch within a single allocation at cr=0
>> -	 * given cr=0 is an CPU-related optimization we shouldn't try to
>> -	 * load too many groups, at some point we should start to use what
>> -	 * we've got in memory.
>> +	/*
>> +	 * now many real IOs to prefetch within a single allocation at
>> +	 * cr=CR_POWER2_ALIGNED. Given cr=CR_POWER2_ALIGNED is an CPU-related
>> +	 * optimization we shouldn't try to load too many groups, at some point
>> +	 * we should start to use what we've got in memory.
>>  	 * with an average random access time 5ms, it'd take a second to get
>>  	 * 200 groups (* N with flex_bg), so let's make this limit 4
>>  	 */
>> diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h
>> index 56938532b4ce..042437d8860f 100644
>> --- a/fs/ext4/mballoc.h
>> +++ b/fs/ext4/mballoc.h
>> @@ -187,8 +187,8 @@ struct ext4_allocation_context {
>>  	struct ext4_free_extent ac_f_ex;
>>  
>>  	/*
>> -	 * goal len can change in CR1.5, so save the original len. This is
>> -	 * used while adjusting the PA window and for accounting.
>> +	 * goal len can change in CR_BEST_AVAIL_LEN, so save the original len.
>> +	 * This is used while adjusting the PA window and for accounting.
>>  	 */
>>  	ext4_grpblk_t	ac_orig_goal_len;
>>  
>> -- 
>> 2.30.0
>>
diff mbox series

Patch

diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 023571f8dd1b..9b90013c59a3 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -213,11 +213,20 @@  enum criteria {
 #define EXT4_MB_USE_RESERVED		0x2000
 /* Do strict check for free blocks while retrying block allocation */
 #define EXT4_MB_STRICT_CHECK		0x4000
-/* Large fragment size list lookup succeeded at least once for cr = 0 */
+/*
+ * Large fragment size list lookup succeeded at least once for cr =
+ * CR_POWER2_ALIGNED
+ */
 #define EXT4_MB_CR_POWER2_ALIGNED_OPTIMIZED		0x8000
-/* Avg fragment size rb tree lookup succeeded at least once for cr = 1 */
+/*
+ * Avg fragment size rb tree lookup succeeded at least once for cr =
+ * CR_GOAL_LEN_FAST
+ */
 #define EXT4_MB_CR_GOAL_LEN_FAST_OPTIMIZED		0x00010000
-/* Avg fragment size rb tree lookup succeeded at least once for cr = 1.5 */
+/*
+ * Avg fragment size rb tree lookup succeeded at least once for cr =
+ * CR_BEST_AVAIL_LEN
+ */
 #define EXT4_MB_CR_BEST_AVAIL_LEN_OPTIMIZED		0x00020000
 
 struct ext4_allocation_request {
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 62d468379722..0f8a34513bf6 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -1131,8 +1131,9 @@  static void ext4_mb_choose_next_group(struct ext4_allocation_context *ac,
 		ext4_mb_choose_next_group_best_avail(ac, new_cr, group);
 	} else {
 		/*
-		 * TODO: For CR=2, we can arrange groups in an rb tree sorted by
-		 * bb_free. But until that happens, we should never come here.
+		 * TODO: For CR=CR_GOAL_LEN_SLOW, we can arrange groups in an
+		 * rb tree sorted by bb_free. But until that happens, we should
+		 * never come here.
 		 */
 		WARN_ON(1);
 	}
@@ -3444,10 +3445,11 @@  static int ext4_mb_init_backend(struct super_block *sb)
 	}
 	if (sbi->s_mb_prefetch > ext4_get_groups_count(sb))
 		sbi->s_mb_prefetch = ext4_get_groups_count(sb);
-	/* now many real IOs to prefetch within a single allocation at cr=0
-	 * given cr=0 is an CPU-related optimization we shouldn't try to
-	 * load too many groups, at some point we should start to use what
-	 * we've got in memory.
+	/*
+	 * now many real IOs to prefetch within a single allocation at
+	 * cr=CR_POWER2_ALIGNED. Given cr=CR_POWER2_ALIGNED is an CPU-related
+	 * optimization we shouldn't try to load too many groups, at some point
+	 * we should start to use what we've got in memory.
 	 * with an average random access time 5ms, it'd take a second to get
 	 * 200 groups (* N with flex_bg), so let's make this limit 4
 	 */
diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h
index 56938532b4ce..042437d8860f 100644
--- a/fs/ext4/mballoc.h
+++ b/fs/ext4/mballoc.h
@@ -187,8 +187,8 @@  struct ext4_allocation_context {
 	struct ext4_free_extent ac_f_ex;
 
 	/*
-	 * goal len can change in CR1.5, so save the original len. This is
-	 * used while adjusting the PA window and for accounting.
+	 * goal len can change in CR_BEST_AVAIL_LEN, so save the original len.
+	 * This is used while adjusting the PA window and for accounting.
 	 */
 	ext4_grpblk_t	ac_orig_goal_len;