diff mbox

powerpc: VRM unused page hinting

Message ID 20090414013741.GA8678@austin.ibm.com (mailing list archive)
State Superseded, archived
Headers show

Commit Message

Robert Jennings April 14, 2009, 1:37 a.m. UTC
Adds support for the "unused" page hint which can be used in shared
memory partitions to flag pages not in use, which will then be stolen
before active pages by the hypervisor when memory needs to be moved to
LPARs in need of additional memory.  Failure to mark pages as 'unused'
makes the LPAR slower to give up unused memory to other partitions.

Signed-off-by: Brian King <brking@linux.vnet.ibm.com>
Signed-off-by: Robert Jennings <rcj@linux.vnet.ibm.com>

---

 arch/powerpc/include/asm/page.h       |    5 +++
 arch/powerpc/platforms/pseries/lpar.c |   52 ++++++++++++++++++++++++++++++++++
 2 files changed, 57 insertions(+)

Comments

Paul Mackerras April 15, 2009, 4:46 a.m. UTC | #1
Robert Jennings writes:

> Adds support for the "unused" page hint which can be used in shared
> memory partitions to flag pages not in use, which will then be stolen
> before active pages by the hypervisor when memory needs to be moved to
> LPARs in need of additional memory.  Failure to mark pages as 'unused'
> makes the LPAR slower to give up unused memory to other partitions.

The patch also adds a kernel command line option.  That should be
mentioned in the patch description and documented in
Documentation/kernel-parameters.txt.  Also, I'd like you to choose a
better name for the kernel command line option than "cmma", which is
completely opaque to me -- it doesn't seem to be the name or even the
initials of anything in the patch.  Presumably it's somehow related to
CMO, but just how is not at all clear.

Paul.
diff mbox

Patch

Index: b/arch/powerpc/include/asm/page.h
===================================================================
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -231,6 +231,11 @@  extern void copy_user_page(void *to, voi
 		struct page *p);
 extern int page_is_ram(unsigned long pfn);
 
+#ifdef CONFIG_PPC_SMLPAR
+void arch_free_page(struct page *page, int order);
+#define HAVE_ARCH_FREE_PAGE
+#endif
+
 struct vm_area_struct;
 
 typedef struct page *pgtable_t;
Index: b/arch/powerpc/platforms/pseries/lpar.c
===================================================================
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -609,3 +609,55 @@  void __init hpte_init_lpar(void)
 	ppc_md.flush_hash_range	= pSeries_lpar_flush_hash_range;
 	ppc_md.hpte_clear_all   = pSeries_lpar_hptab_clear;
 }
+
+#ifdef CONFIG_PPC_SMLPAR
+#define CMMA_DEFAULT 1
+static int cmma_flag = CMMA_DEFAULT;
+
+static int __init cmma(char *str)
+{
+	char *parm;
+	parm = strstrip(str);
+
+	if (strcasecmp(parm, "no") == 0 || strcasecmp(parm, "off") == 0) {
+		printk(KERN_INFO "cmma: page marking is not active.\n");
+		cmma_flag = 0;
+		return 1;
+	}
+
+	cmma_flag = 1;
+	printk(KERN_INFO "cmma: page marking is active.\n");
+
+	if (strcasecmp(parm, "yes") == 0 || strcasecmp(parm, "on") == 0)
+		return 1;
+
+	return 0;
+}
+
+__setup("cmma=", cmma);
+
+static void pSeries_set_page_state(struct page *page, int order,
+				   unsigned long state)
+{
+	int i, j;
+	unsigned long cmo_page_sz, addr;
+
+	cmo_page_sz = cmo_get_page_size();
+	addr = __pa((unsigned long)page_address(page));
+
+	for (i = 0; i < (1 << order); i++, addr += PAGE_SIZE) {
+		for (j = 0; j < PAGE_SIZE; j += cmo_page_sz)
+			plpar_hcall_norets(H_PAGE_INIT, state, addr + j, 0);
+	}
+}
+
+void arch_free_page(struct page *page, int order)
+{
+	if (!cmma_flag || !firmware_has_feature(FW_FEATURE_CMO))
+		return;
+
+	pSeries_set_page_state(page, order, H_PAGE_SET_UNUSED);
+}
+EXPORT_SYMBOL(arch_free_page);
+
+#endif