Patchwork fix return value for mb_cache_shrink_fn when nr_to_scan > 0

login
register
mail settings
Submitter shenghui
Date July 21, 2010, 10:53 a.m.
Message ID <4C46D1C5.90200@gmail.com>
Download mbox | patch
Permalink /patch/59431/
State Not Applicable
Headers show

Comments

shenghui - July 21, 2010, 10:53 a.m.
Sorry. regerated the patch, please check it.
I wrapped most code in single pair of spinlock ops for 2 reasons:
1) get spinlock 2 times seems time consuming
2) use single pair of spinlock ops can keep "count"
   consistent for the shrink operation. 2 pairs may
   get some new ces created by other processes. 



Signed-off-by: Wang Sheng-Hui <crosslonelyover@gmail.com>
---
 fs/mbcache.c |   24 ++++++++++++------------
 1 files changed, 12 insertions(+), 12 deletions(-)
Eric Sandeen - July 21, 2010, 2 p.m.
Wang Sheng-Hui wrote:
> Sorry. regerated the patch, please check it.
> I wrapped most code in single pair of spinlock ops for 2 reasons:
> 1) get spinlock 2 times seems time consuming
> 2) use single pair of spinlock ops can keep "count"
>   consistent for the shrink operation. 2 pairs may
>   get some new ces created by other processes.
> 

Sorry, this patch appears to have whitespace cut & paste mangling.

More comments below.

> Signed-off-by: Wang Sheng-Hui <crosslonelyover@gmail.com>
> ---
> fs/mbcache.c |   24 ++++++++++++------------
> 1 files changed, 12 insertions(+), 12 deletions(-)
> 
> diff --git a/fs/mbcache.c b/fs/mbcache.c
> index ec88ff3..ee57aa3 100644
> --- a/fs/mbcache.c
> +++ b/fs/mbcache.c
> @@ -201,21 +201,15 @@ mb_cache_shrink_fn(int nr_to_scan, gfp_t gfp_mask)
> {
>     LIST_HEAD(free_list);
>     struct list_head *l, *ltmp;
> +    struct mb_cache *cache;
>     int count = 0;
> 
> -    spin_lock(&mb_cache_spinlock);
> -    list_for_each(l, &mb_cache_list) {
> -        struct mb_cache *cache =
> -            list_entry(l, struct mb_cache, c_cache_list);
> -        mb_debug("cache %s (%d)", cache->c_name,
> -              atomic_read(&cache->c_entry_count));
> -        count += atomic_read(&cache->c_entry_count);
> -    }
>     mb_debug("trying to free %d entries", nr_to_scan);
> -    if (nr_to_scan == 0) {
> -        spin_unlock(&mb_cache_spinlock);
> +
> +    spin_lock(&mb_cache_spinlock);
> +    if (nr_to_scan == 0)
>         goto out;
> -    }
> +
>     while (nr_to_scan-- && !list_empty(&mb_cache_lru_list)) {
>         struct mb_cache_entry *ce =
>             list_entry(mb_cache_lru_list.next,
> @@ -223,12 +217,18 @@ mb_cache_shrink_fn(int nr_to_scan, gfp_t gfp_mask)
>         list_move_tail(&ce->e_lru_list, &free_list);
>         __mb_cache_entry_unhash(ce);
>     }
> -    spin_unlock(&mb_cache_spinlock);

you can't do this because

>     list_for_each_safe(l, ltmp, &free_list) {
>         __mb_cache_entry_forget(list_entry(l, struct mb_cache_entry,

this takes the spinlock too and you'll deadlock.

Did you test this patch?

-Eric

>                            e_lru_list), gfp_mask);
>     }
> out:
> +    list_for_each_entry(cache, &mb_cache_list, c_cache_list) {
> +        mb_debug("cache %s (%d)", cache->c_name,
> +              atomic_read(&cache->c_entry_count));
> +        count += atomic_read(&cache->c_entry_count);
> +    }
> +    spin_unlock(&mb_cache_spinlock);
> +
>     return (count / 100) * sysctl_vfs_cache_pressure;
> }
> 

--
To unsubscribe from this list: send the line "unsubscribe linux-ext4" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Andreas Gruenbacher - July 21, 2010, 5:57 p.m.
Al,

here is an mbcache cleanup and then a fixed version of Shenghui's minor
shrinker function fix.  The patches have survived functional testing
here.

This seems slightly too much for kernel-janitors, so could you please
take the patches?

Thanks,
Andreas

Andreas Gruenbacher (2):
  mbcache: Remove unused features
  mbcache: fix shrinker function return value

 fs/ext2/xattr.c         |   12 ++--
 fs/ext3/xattr.c         |   12 ++--
 fs/ext4/xattr.c         |   12 ++--
 fs/mbcache.c            |  168 ++++++++++++++---------------------------------
 include/linux/mbcache.h |   20 ++----
 5 files changed, 70 insertions(+), 154 deletions(-)
Al Viro - July 21, 2010, 11:22 p.m.
On Wed, Jul 21, 2010 at 07:57:20PM +0200, Andreas Gruenbacher wrote:
> Al,
> 
> here is an mbcache cleanup and then a fixed version of Shenghui's minor
> shrinker function fix.  The patches have survived functional testing
> here.
> 
> This seems slightly too much for kernel-janitors, so could you please
> take the patches?

Done.
--
To unsubscribe from this list: send the line "unsubscribe linux-ext4" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Patch

diff --git a/fs/mbcache.c b/fs/mbcache.c
index ec88ff3..ee57aa3 100644
--- a/fs/mbcache.c
+++ b/fs/mbcache.c
@@ -201,21 +201,15 @@  mb_cache_shrink_fn(int nr_to_scan, gfp_t gfp_mask)
 {
 	LIST_HEAD(free_list);
 	struct list_head *l, *ltmp;
+	struct mb_cache *cache;
 	int count = 0;
 
-	spin_lock(&mb_cache_spinlock);
-	list_for_each(l, &mb_cache_list) {
-		struct mb_cache *cache =
-			list_entry(l, struct mb_cache, c_cache_list);
-		mb_debug("cache %s (%d)", cache->c_name,
-			  atomic_read(&cache->c_entry_count));
-		count += atomic_read(&cache->c_entry_count);
-	}
 	mb_debug("trying to free %d entries", nr_to_scan);
-	if (nr_to_scan == 0) {
-		spin_unlock(&mb_cache_spinlock);
+
+	spin_lock(&mb_cache_spinlock);
+	if (nr_to_scan == 0)
 		goto out;
-	}
+
 	while (nr_to_scan-- && !list_empty(&mb_cache_lru_list)) {
 		struct mb_cache_entry *ce =
 			list_entry(mb_cache_lru_list.next,
@@ -223,12 +217,18 @@  mb_cache_shrink_fn(int nr_to_scan, gfp_t gfp_mask)
 		list_move_tail(&ce->e_lru_list, &free_list);
 		__mb_cache_entry_unhash(ce);
 	}
-	spin_unlock(&mb_cache_spinlock);
 	list_for_each_safe(l, ltmp, &free_list) {
 		__mb_cache_entry_forget(list_entry(l, struct mb_cache_entry,
 						   e_lru_list), gfp_mask);
 	}
 out:
+	list_for_each_entry(cache, &mb_cache_list, c_cache_list) {
+		mb_debug("cache %s (%d)", cache->c_name,
+			  atomic_read(&cache->c_entry_count));
+		count += atomic_read(&cache->c_entry_count);
+	}
+	spin_unlock(&mb_cache_spinlock);
+
 	return (count / 100) * sysctl_vfs_cache_pressure;
 }