diff mbox

[Powerpc/SLQB] Next June 06 : BUG during scsi initialization

Message ID Pine.LNX.4.64.0906071105030.11309@melkki.cs.Helsinki.FI (mailing list archive)
State Not Applicable, archived
Headers show

Commit Message

Pekka Enberg June 7, 2009, 8:06 a.m. UTC
Hi Sachin,

On Fri, 5 Jun 2009, Sachin Sant wrote:
> I can still recreate this bug on a Power 6 hardware with today's next tree.
> I can recreate this problem at will.
> Let me know if i can help in debugging this problem.

Can you please reproduce the issue with this debugging patch applied and 
post the result?

			Pekka

From 27189e1e1d2890e98cb029bd1121c86b8c53ecd9 Mon Sep 17 00:00:00 2001
From: Pekka Enberg <penberg@cs.helsinki.fi>
Date: Sun, 7 Jun 2009 11:03:50 +0300
Subject: [PATCH] slqb: debugging

Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
---
 mm/slqb.c |   13 ++++++++++++-
 1 files changed, 12 insertions(+), 1 deletions(-)

Comments

Sachin P. Sant June 8, 2009, 12:12 p.m. UTC | #1
Pekka J Enberg wrote:
> Hi Sachin,
>
> On Fri, 5 Jun 2009, Sachin Sant wrote:
>   
>> I can still recreate this bug on a Power 6 hardware with today's next tree.
>> I can recreate this problem at will.
>> Let me know if i can help in debugging this problem.
>>     
>
> Can you please reproduce the issue with this debugging patch applied and 
> post the result?
I had some trouble collecting debug o/p with this patch. Because of the
number of printk's the machine was crawling during boot. It was difficult
to read/collect the o/p. So i inserted a hack as follows such that
slab_alloc_page prints only during the failure.(modprobe scsi-driver)

+ if (strcmp(current->comm, "modprobe"))
+	printk(KERN_INFO "%s: cpu=%d, cache_cpu=%p, cache_list=%p\n", __func__, cpu, c, l);

Attached here is the boot log. Let me know if i can provide any other
information.

Thanks
-Sachin
diff mbox

Patch

diff --git a/mm/slqb.c b/mm/slqb.c
index 29bb005..dce39d4 100644
--- a/mm/slqb.c
+++ b/mm/slqb.c
@@ -1382,6 +1382,8 @@  static noinline void *__slab_alloc_page(struct kmem_cache *s,
 		l = &c->list;
 		page->list = l;
 
+		printk(KERN_INFO "%s: cpu=%d, cache_cpu=%p, cache_list=%p\n", __func__, cpu, c, l);
+
 		spin_lock(&l->page_lock);
 		l->nr_slabs++;
 		l->nr_partial++;
@@ -1393,11 +1395,15 @@  static noinline void *__slab_alloc_page(struct kmem_cache *s,
 	} else {
 #ifdef CONFIG_NUMA
 		struct kmem_cache_node *n;
+		int nid;
 
-		n = s->node_slab[slqb_page_to_nid(page)];
+		nid = slqb_page_to_nid(page);
+		n = s->node_slab[nid];
 		l = &n->list;
 		page->list = l;
 
+		printk(KERN_INFO "%s: nid=%d, cache_node=%p, cache_list=%p\n", __func__, nid, n, l);
+
 		spin_lock(&n->list_lock);
 		spin_lock(&l->page_lock);
 		l->nr_slabs++;
@@ -2028,6 +2034,8 @@  static void free_kmem_cache_nodes(struct kmem_cache *s)
 	for_each_node_state(node, N_NORMAL_MEMORY) {
 		struct kmem_cache_node *n;
 
+		printk(KERN_INFO "%s: cache=%s, node=%d\n", __func__, s->name, node);
+
 		n = s->node_slab[node];
 		if (n) {
 			kmem_cache_free(&kmem_node_cache, n);
@@ -2043,8 +2051,11 @@  static int alloc_kmem_cache_nodes(struct kmem_cache *s)
 	for_each_node_state(node, N_NORMAL_MEMORY) {
 		struct kmem_cache_node *n;
 
+		printk(KERN_INFO "%s: cache=%s, node=%d\n", __func__, s->name, node);
+
 		n = kmem_cache_alloc_node(&kmem_node_cache, GFP_KERNEL, node);
 		if (!n) {
+			printk(KERN_INFO "%s: %s: kmem_cache_alloc_node() failed for node %d\n", __func__, s->name, node);
 			free_kmem_cache_nodes(s);
 			return 0;
 		}