Patchwork powerpc: Track backing pages used allocated by vmemmap_populate()

login
register
mail settings
Submitter Mark Nelson
Date March 26, 2010, 7:12 a.m.
Message ID <201003261812.34095.markn@au1.ibm.com>
Download mbox | patch
Permalink /patch/48615/
State Superseded
Headers show

Comments

Mark Nelson - March 26, 2010, 7:12 a.m.
We need to keep track of the backing pages that get allocated by
vmemmap_populate() so that when we use kdump, the dump-capture kernel can
find these pages in memory.

We use a linked list of structures that contain the physical address of the
backing page and corresponding virtual address to track the backing pages.
And we use a simple spinlock to protect the vmemmap_list.

Signed-off-by: Mark Nelson <markn@au1.ibm.com>
---
 arch/powerpc/include/asm/pgalloc-64.h |    7 +++++++
 arch/powerpc/mm/init_64.c             |   27 +++++++++++++++++++++++++++
 2 files changed, 34 insertions(+)

Patch

Index: upstream/arch/powerpc/include/asm/pgalloc-64.h
===================================================================
--- upstream.orig/arch/powerpc/include/asm/pgalloc-64.h
+++ upstream/arch/powerpc/include/asm/pgalloc-64.h
@@ -10,6 +10,13 @@ 
 #include <linux/slab.h>
 #include <linux/cpumask.h>
 #include <linux/percpu.h>
+#include <linux/list.h>
+
+struct vmemmap_backing {
+	unsigned long phys;
+	unsigned long virt_addr;
+	struct list_head list;
+};
 
 /*
  * Functions that deal with pagetables that could be at any level of
Index: upstream/arch/powerpc/mm/init_64.c
===================================================================
--- upstream.orig/arch/powerpc/mm/init_64.c
+++ upstream/arch/powerpc/mm/init_64.c
@@ -42,6 +42,7 @@ 
 #include <linux/poison.h>
 #include <linux/lmb.h>
 #include <linux/hugetlb.h>
+#include <linux/list.h>
 
 #include <asm/pgalloc.h>
 #include <asm/page.h>
@@ -251,6 +252,30 @@  static void __meminit vmemmap_create_map
 }
 #endif /* CONFIG_PPC_BOOK3E */
 
+LIST_HEAD(vmemmap_list);
+DEFINE_SPINLOCK(vmemmap_list_lock);
+
+static __meminit void vmemmap_list_populate(unsigned long phys,
+					    unsigned long start,
+					    int node)
+{
+	struct vmemmap_backing *vmem_back;
+
+	vmem_back = vmemmap_alloc_block(sizeof(struct vmemmap_backing), node);
+	if (unlikely(!vmem_back)) {
+		WARN_ON(1);
+		return;
+	}
+
+	vmem_back->phys = phys;
+	vmem_back->virt_addr = start;
+	INIT_LIST_HEAD(&vmem_back->list);
+
+	spin_lock(&vmemmap_list_lock);
+	list_add(&vmem_back->list, &vmemmap_list);
+	spin_unlock(&vmemmap_list_lock);
+}
+
 int __meminit vmemmap_populate(struct page *start_page,
 			       unsigned long nr_pages, int node)
 {
@@ -275,6 +300,8 @@  int __meminit vmemmap_populate(struct pa
 		if (!p)
 			return -ENOMEM;
 
+		vmemmap_list_populate(__pa(p), start, node);
+
 		pr_debug("      * %016lx..%016lx allocated at %p\n",
 			 start, start + page_size, p);