diff mbox

slub: move synchronize_sched out of slab_mutex on shrink

Message ID 20161214142910.26061-1-colin.king@canonical.com
State New
Headers show

Commit Message

Colin Ian King Dec. 14, 2016, 2:29 p.m. UTC
From: Vladimir Davydov <vdavydov.dev@gmail.com>

BugLink: http://bugs.launchpad.net/bugs/1649905

synchronize_sched() is a heavy operation and calling it per each cache
owned by a memory cgroup being destroyed may take quite some time.  What
is worse, it's currently called under the slab_mutex, stalling all works
doing cache creation/destruction.

Actually, there isn't much point in calling synchronize_sched() for each
cache - it's enough to call it just once - after setting cpu_partial for
all caches and before shrinking them.  This way, we can also move it out
of the slab_mutex, which we have to hold for iterating over the slab
cache list.

Link: https://bugzilla.kernel.org/show_bug.cgi?id=172991
Link: http://lkml.kernel.org/r/0a10d71ecae3db00fb4421bcd3f82bcc911f4be4.1475329751.git.vdavydov.dev@gmail.com
Signed-off-by: Vladimir Davydov <vdavydov.dev@gmail.com>
Reported-by: Doug Smythies <dsmythies@telus.net>
Acked-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Pekka Enberg <penberg@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
(cherry picked from commit 89e364db71fb5e7fc8d93228152abfa67daf35fa)
Signed-off-by: Colin Ian King <colin.king@canonical.com)
---
 mm/slab.c        |  4 ++--
 mm/slab.h        |  2 +-
 mm/slab_common.c | 27 +++++++++++++++++++++++++--
 mm/slob.c        |  2 +-
 mm/slub.c        | 19 ++-----------------
 5 files changed, 31 insertions(+), 23 deletions(-)

Comments

Tim Gardner Dec. 14, 2016, 2:33 p.m. UTC | #1
Good find!
Thadeu Lima de Souza Cascardo Dec. 15, 2016, 9 a.m. UTC | #2
On Wed, Dec 14, 2016 at 02:29:10PM +0000, Colin King wrote:
> From: Vladimir Davydov <vdavydov.dev@gmail.com>
> 
> BugLink: http://bugs.launchpad.net/bugs/1649905
> 
> synchronize_sched() is a heavy operation and calling it per each cache
> owned by a memory cgroup being destroyed may take quite some time.  What
> is worse, it's currently called under the slab_mutex, stalling all works
> doing cache creation/destruction.
> 
> Actually, there isn't much point in calling synchronize_sched() for each
> cache - it's enough to call it just once - after setting cpu_partial for
> all caches and before shrinking them.  This way, we can also move it out
> of the slab_mutex, which we have to hold for iterating over the slab
> cache list.
> 

[...]

What series is this targeted to?

Thanks.
Cascardo.
Colin Ian King Dec. 16, 2016, 5:35 p.m. UTC | #3
On 15/12/16 09:00, Thadeu Lima de Souza Cascardo wrote:
> On Wed, Dec 14, 2016 at 02:29:10PM +0000, Colin King wrote:
>> From: Vladimir Davydov <vdavydov.dev@gmail.com>
>>
>> BugLink: http://bugs.launchpad.net/bugs/1649905
>>
>> synchronize_sched() is a heavy operation and calling it per each cache
>> owned by a memory cgroup being destroyed may take quite some time.  What
>> is worse, it's currently called under the slab_mutex, stalling all works
>> doing cache creation/destruction.
>>
>> Actually, there isn't much point in calling synchronize_sched() for each
>> cache - it's enough to call it just once - after setting cpu_partial for
>> all caches and before shrinking them.  This way, we can also move it out
>> of the slab_mutex, which we have to hold for iterating over the slab
>> cache list.
>>
> 
> [...]
> 
> What series is this targeted to?

Yakkety SRU and Zesty

Colin
> 
> Thanks.
> Cascardo.
>
Thadeu Lima de Souza Cascardo Dec. 16, 2016, 5:57 p.m. UTC | #4
ACK.
Luis Henriques Dec. 19, 2016, 9:59 a.m. UTC | #5
Applied to yakkety master-next branch.

Cheers,
--
Luís
diff mbox

Patch

diff --git a/mm/slab.c b/mm/slab.c
index 0b0550c..7ea765c 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2332,7 +2332,7 @@  static int drain_freelist(struct kmem_cache *cache,
 	return nr_freed;
 }
 
-int __kmem_cache_shrink(struct kmem_cache *cachep, bool deactivate)
+int __kmem_cache_shrink(struct kmem_cache *cachep)
 {
 	int ret = 0;
 	int node;
@@ -2352,7 +2352,7 @@  int __kmem_cache_shrink(struct kmem_cache *cachep, bool deactivate)
 
 int __kmem_cache_shutdown(struct kmem_cache *cachep)
 {
-	return __kmem_cache_shrink(cachep, false);
+	return __kmem_cache_shrink(cachep);
 }
 
 void __kmem_cache_release(struct kmem_cache *cachep)
diff --git a/mm/slab.h b/mm/slab.h
index bc05fdc..ceb7d70 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -146,7 +146,7 @@  static inline unsigned long kmem_cache_flags(unsigned long object_size,
 
 int __kmem_cache_shutdown(struct kmem_cache *);
 void __kmem_cache_release(struct kmem_cache *);
-int __kmem_cache_shrink(struct kmem_cache *, bool);
+int __kmem_cache_shrink(struct kmem_cache *);
 void slab_kmem_cache_release(struct kmem_cache *);
 
 struct seq_file;
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 329b038..5d2f24f 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -573,6 +573,29 @@  void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg)
 	get_online_cpus();
 	get_online_mems();
 
+#ifdef CONFIG_SLUB
+	/*
+	 * In case of SLUB, we need to disable empty slab caching to
+	 * avoid pinning the offline memory cgroup by freeable kmem
+	 * pages charged to it. SLAB doesn't need this, as it
+	 * periodically purges unused slabs.
+	 */
+	mutex_lock(&slab_mutex);
+	list_for_each_entry(s, &slab_caches, list) {
+		c = is_root_cache(s) ? cache_from_memcg_idx(s, idx) : NULL;
+		if (c) {
+			c->cpu_partial = 0;
+			c->min_partial = 0;
+		}
+	}
+	mutex_unlock(&slab_mutex);
+	/*
+	 * kmem_cache->cpu_partial is checked locklessly (see
+	 * put_cpu_partial()). Make sure the change is visible.
+	 */
+	synchronize_sched();
+#endif
+
 	mutex_lock(&slab_mutex);
 	list_for_each_entry(s, &slab_caches, list) {
 		if (!is_root_cache(s))
@@ -584,7 +607,7 @@  void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg)
 		if (!c)
 			continue;
 
-		__kmem_cache_shrink(c, true);
+		__kmem_cache_shrink(c);
 		arr->entries[idx] = NULL;
 	}
 	mutex_unlock(&slab_mutex);
@@ -755,7 +778,7 @@  int kmem_cache_shrink(struct kmem_cache *cachep)
 	get_online_cpus();
 	get_online_mems();
 	kasan_cache_shrink(cachep);
-	ret = __kmem_cache_shrink(cachep, false);
+	ret = __kmem_cache_shrink(cachep);
 	put_online_mems();
 	put_online_cpus();
 	return ret;
diff --git a/mm/slob.c b/mm/slob.c
index 5ec1580..eac04d4 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -634,7 +634,7 @@  void __kmem_cache_release(struct kmem_cache *c)
 {
 }
 
-int __kmem_cache_shrink(struct kmem_cache *d, bool deactivate)
+int __kmem_cache_shrink(struct kmem_cache *d)
 {
 	return 0;
 }
diff --git a/mm/slub.c b/mm/slub.c
index 2b3e740..4a861f2 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3883,7 +3883,7 @@  EXPORT_SYMBOL(kfree);
  * being allocated from last increasing the chance that the last objects
  * are freed in them.
  */
-int __kmem_cache_shrink(struct kmem_cache *s, bool deactivate)
+int __kmem_cache_shrink(struct kmem_cache *s)
 {
 	int node;
 	int i;
@@ -3895,21 +3895,6 @@  int __kmem_cache_shrink(struct kmem_cache *s, bool deactivate)
 	unsigned long flags;
 	int ret = 0;
 
-	if (deactivate) {
-		/*
-		 * Disable empty slabs caching. Used to avoid pinning offline
-		 * memory cgroups by kmem pages that can be freed.
-		 */
-		s->cpu_partial = 0;
-		s->min_partial = 0;
-
-		/*
-		 * s->cpu_partial is checked locklessly (see put_cpu_partial),
-		 * so we have to make sure the change is visible.
-		 */
-		synchronize_sched();
-	}
-
 	flush_all(s);
 	for_each_kmem_cache_node(s, node, n) {
 		INIT_LIST_HEAD(&discard);
@@ -3966,7 +3951,7 @@  static int slab_mem_going_offline_callback(void *arg)
 
 	mutex_lock(&slab_mutex);
 	list_for_each_entry(s, &slab_caches, list)
-		__kmem_cache_shrink(s, false);
+		__kmem_cache_shrink(s);
 	mutex_unlock(&slab_mutex);
 
 	return 0;