diff mbox

[-V2,4/4] powerpc/kvm: Use 256K chunk to track both RMA and hash page table allocation.

Message ID 1372410662-3748-4-git-send-email-aneesh.kumar@linux.vnet.ibm.com (mailing list archive)
State Superseded
Headers show

Commit Message

Aneesh Kumar K.V June 28, 2013, 9:11 a.m. UTC
From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>

Both RMA and hash page table request will be a multiple of 256K. We can use
a chunk size of 256K to track the free/used 256K chunk in the bitmap. This
should help to reduce the bitmap size.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
---
 arch/powerpc/kvm/book3s_hv_cma.c | 35 +++++++++++++++++++++++++----------
 1 file changed, 25 insertions(+), 10 deletions(-)

Comments

Paul Mackerras June 29, 2013, 7:43 a.m. UTC | #1
On Fri, Jun 28, 2013 at 02:41:02PM +0530, Aneesh Kumar K.V wrote:
> From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
> 
> Both RMA and hash page table request will be a multiple of 256K. We can use
> a chunk size of 256K to track the free/used 256K chunk in the bitmap. This
> should help to reduce the bitmap size.
> 
> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>

Looks good overall, just some minor comments below:

> +	int chunk_count, nr_chunk;

I get a little nervous when I see "int" used for variables storing a
number of pages or related things such as chunks.  Yes, int is enough
today but one day it won't be, and there is no time or space penalty
to using "long" instead, and in fact the code generated "long"
variables can be slightly shorter.  So please make variables like this
"long".  (That will require changes to earlier patches in this
series.)

> +	 * aling mask with chunk size. The bit tracks pages in chunk size

Should be "align".

Paul.
diff mbox

Patch

diff --git a/arch/powerpc/kvm/book3s_hv_cma.c b/arch/powerpc/kvm/book3s_hv_cma.c
index fdd0b88..018613a 100644
--- a/arch/powerpc/kvm/book3s_hv_cma.c
+++ b/arch/powerpc/kvm/book3s_hv_cma.c
@@ -23,6 +23,10 @@ 
 #include <linux/mutex.h>
 #include <linux/sizes.h>
 #include <linux/slab.h>
+/*
+ * Both RMA and Hash page allocation will be multiple of 256K.
+ */
+#define KVM_CMA_CHUNK_ORDER	18
 
 struct kvm_cma {
 	unsigned long	base_pfn;
@@ -94,6 +98,7 @@  err:
 struct page *kvm_alloc_cma(int nr_pages, unsigned long align_pages)
 {
 	int ret;
+	int chunk_count, nr_chunk;
 	struct page *page = NULL;
 	struct kvm_cma *cma = &kvm_cma_area;
 	unsigned long mask, pfn, pageno, start = 0;
@@ -107,20 +112,26 @@  struct page *kvm_alloc_cma(int nr_pages, unsigned long align_pages)
 
 	if (!nr_pages)
 		return NULL;
+	/*
+	 * aling mask with chunk size. The bit tracks pages in chunk size
+	 */
+	mask = (align_pages >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT)) - 1;
+	BUILD_BUG_ON(PAGE_SHIFT > KVM_CMA_CHUNK_ORDER);
 
-	mask = align_pages - 1;
+	chunk_count = cma->count >>  (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
+	nr_chunk = nr_pages >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
 
 	mutex_lock(&kvm_cma_mutex);
 	for (;;) {
-		pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,
-						    start, nr_pages, mask);
-		if (pageno >= cma->count)
+		pageno = bitmap_find_next_zero_area(cma->bitmap, chunk_count,
+						    start, nr_chunk, mask);
+		if (pageno >= chunk_count)
 			break;
 
-		pfn = cma->base_pfn + pageno;
+		pfn = cma->base_pfn + (pageno << (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT));
 		ret = alloc_contig_range(pfn, pfn + nr_pages, MIGRATE_CMA);
 		if (ret == 0) {
-			bitmap_set(cma->bitmap, pageno, nr_pages);
+			bitmap_set(cma->bitmap, pageno, nr_chunk);
 			page = pfn_to_page(pfn);
 			memset(pfn_to_kaddr(pfn), 0, nr_pages << PAGE_SHIFT);
 			break;
@@ -148,6 +159,7 @@  struct page *kvm_alloc_cma(int nr_pages, unsigned long align_pages)
  */
 bool kvm_release_cma(struct page *pages, int nr_pages)
 {
+	int nr_chunk;
 	unsigned long pfn;
 	struct kvm_cma *cma = &kvm_cma_area;
 
@@ -163,9 +175,12 @@  bool kvm_release_cma(struct page *pages, int nr_pages)
 		return false;
 
 	VM_BUG_ON(pfn + nr_pages > cma->base_pfn + cma->count);
+	nr_chunk = nr_pages >>  (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
 
 	mutex_lock(&kvm_cma_mutex);
-	bitmap_clear(cma->bitmap, pfn - cma->base_pfn, nr_pages);
+	bitmap_clear(cma->bitmap,
+		     (pfn - cma->base_pfn) >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT),
+		     nr_chunk);
 	free_contig_range(pfn, nr_pages);
 	mutex_unlock(&kvm_cma_mutex);
 
@@ -196,14 +211,14 @@  static int __init kvm_cma_activate_area(unsigned long base_pfn,
 
 static int __init kvm_cma_init_reserved_areas(void)
 {
-	int bitmap_size, ret;
+	int bitmap_size, ret, chunk_count;
 	struct kvm_cma *cma = &kvm_cma_area;
 
 	pr_debug("%s()\n", __func__);
 	if (!cma->count)
 		return 0;
-
-	bitmap_size = BITS_TO_LONGS(cma->count) * sizeof(long);
+	chunk_count = cma->count >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
+	bitmap_size = BITS_TO_LONGS(chunk_count) * sizeof(long);
 	cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
 	if (!cma->bitmap)
 		return -ENOMEM;