@@ -1382,6 +1382,8 @@ static noinline void *__slab_alloc_page(struct kmem_cache *s,
l = &c->list;
page->list = l;
+ printk(KERN_INFO "%s: cpu=%d, cache_cpu=%p, cache_list=%p\n", __func__, cpu, c, l);
+
spin_lock(&l->page_lock);
l->nr_slabs++;
l->nr_partial++;
@@ -1393,11 +1395,15 @@ static noinline void *__slab_alloc_page(struct kmem_cache *s,
} else {
#ifdef CONFIG_NUMA
struct kmem_cache_node *n;
+ int nid;
- n = s->node_slab[slqb_page_to_nid(page)];
+ nid = slqb_page_to_nid(page);
+ n = s->node_slab[nid];
l = &n->list;
page->list = l;
+ printk(KERN_INFO "%s: nid=%d, cache_node=%p, cache_list=%p\n", __func__, nid, n, l);
+
spin_lock(&n->list_lock);
spin_lock(&l->page_lock);
l->nr_slabs++;
@@ -2028,6 +2034,8 @@ static void free_kmem_cache_nodes(struct kmem_cache *s)
for_each_node_state(node, N_NORMAL_MEMORY) {
struct kmem_cache_node *n;
+ printk(KERN_INFO "%s: cache=%s, node=%d\n", __func__, s->name, node);
+
n = s->node_slab[node];
if (n) {
kmem_cache_free(&kmem_node_cache, n);
@@ -2043,8 +2051,11 @@ static int alloc_kmem_cache_nodes(struct kmem_cache *s)
for_each_node_state(node, N_NORMAL_MEMORY) {
struct kmem_cache_node *n;
+ printk(KERN_INFO "%s: cache=%s, node=%d\n", __func__, s->name, node);
+
n = kmem_cache_alloc_node(&kmem_node_cache, GFP_KERNEL, node);
if (!n) {
+ printk(KERN_INFO "%s: %s: kmem_cache_alloc_node() failed for node %d\n", __func__, s->name, node);
free_kmem_cache_nodes(s);
return 0;
}