diff mbox

[3/7] slub: reduce indention level in kmem_cache_alloc_bulk()

Message ID 20150615155216.18824.26550.stgit@devil
State Not Applicable, archived
Delegated to: David Miller
Headers show

Commit Message

Jesper Dangaard Brouer June 15, 2015, 3:52 p.m. UTC
Use kernel early return style to reduce indention level,
by testing for kmem_cache_debug() and fallback to
none-optimized bulking via __kmem_cache_alloc_bulk().

This also make it easier to fix a bug in the current
implementation, in the next patch.

Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com>
---
 mm/slub.c |   37 +++++++++++++++++++------------------
 1 file changed, 19 insertions(+), 18 deletions(-)


--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/mm/slub.c b/mm/slub.c
index d18f8e195ac4..753f88bd8b40 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2757,32 +2757,33 @@  void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
 EXPORT_SYMBOL(kmem_cache_free_bulk);
 
 bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
-								void **p)
+			   void **p)
 {
-	if (!kmem_cache_debug(s)) {
-		struct kmem_cache_cpu *c;
+	struct kmem_cache_cpu *c;
 
-		/* Drain objects in the per cpu slab */
-		local_irq_disable();
-		c = this_cpu_ptr(s->cpu_slab);
+	/* Debugging fallback to generic bulk */
+	if (kmem_cache_debug(s))
+		return __kmem_cache_alloc_bulk(s, flags, size, p);
 
-		while (size) {
-			void *object = c->freelist;
+	/* Drain objects in the per cpu slab */
+	local_irq_disable();
+	c = this_cpu_ptr(s->cpu_slab);
 
-			if (!object)
-				break;
+	while (size) {
+		void *object = c->freelist;
 
-			c->freelist = get_freepointer(s, object);
-			*p++ = object;
-			size--;
+		if (!object)
+			break;
 
-			if (unlikely(flags & __GFP_ZERO))
-				memset(object, 0, s->object_size);
-		}
-		c->tid = next_tid(c->tid);
+		c->freelist = get_freepointer(s, object);
+		*p++ = object;
+		size--;
 
-		local_irq_enable();
+		if (unlikely(flags & __GFP_ZERO))
+			memset(object, 0, s->object_size);
 	}
+	c->tid = next_tid(c->tid);
+	local_irq_enable();
 
 	return __kmem_cache_alloc_bulk(s, flags, size, p);
 }