diff mbox

[RFC,4/6] KVM: ARM: extend __coherent_cache_guest_page

Message ID 1425667953-3566-5-git-send-email-drjones@redhat.com
State New
Headers show

Commit Message

Andrew Jones March 6, 2015, 6:52 p.m. UTC
Also support only invalidating, rather than always invalidate+clear.

Signed-off-by: Andrew Jones <drjones@redhat.com>
---
 arch/arm/include/asm/kvm_mmu.h   | 7 +++++--
 arch/arm/kvm/mmu.c               | 2 +-
 arch/arm64/include/asm/kvm_mmu.h | 7 +++++--
 3 files changed, 11 insertions(+), 5 deletions(-)
diff mbox

Patch

diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index fd801e96fdd3c..a1c7f554f5de8 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -176,7 +176,8 @@  static inline void *kvm_get_hwpgd(struct kvm *kvm)
 
 struct kvm;
 
-#define kvm_flush_dcache_to_poc(a,l)	__cpuc_flush_dcache_area((a), (l))
+#define kvm_flush_dcache_to_poc(a,l)		__cpuc_flush_dcache_area((a), (l))
+#define kvm_invalidate_cache_to_poc(a,l)	dmac_unmap_area((a), (l), 0)
 
 static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
 {
@@ -184,7 +185,7 @@  static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
 }
 
 static inline void __coherent_cache_guest_page(pfn_t pfn, unsigned long size,
-					       bool need_flush)
+					       bool need_flush, bool invalidate)
 {
 	/*
 	 * If we are going to insert an instruction page and the icache is
@@ -214,6 +215,8 @@  static inline void __coherent_cache_guest_page(pfn_t pfn, unsigned long size,
 
 		if (need_flush)
 			kvm_flush_dcache_to_poc(va, PAGE_SIZE);
+		if (invalidate)
+			kvm_invalidate_cache_to_poc(va, PAGE_SIZE);
 
 		if (icache_is_pipt())
 			__cpuc_coherent_user_range((unsigned long)va,
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 781afc712871c..2f3a6581b9200 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -1158,7 +1158,7 @@  static void coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
 				      unsigned long size, bool uncached)
 {
 	bool need_flush = uncached || !vcpu_has_cache_enabled(vcpu);
-	__coherent_cache_guest_page(pfn, size, need_flush);
+	__coherent_cache_guest_page(pfn, size, need_flush, false);
 }
 
 static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 56a976c776bc2..e1090ad70133d 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -257,7 +257,8 @@  static inline bool kvm_page_empty(void *ptr)
 
 struct kvm;
 
-#define kvm_flush_dcache_to_poc(a,l)	__flush_dcache_area((a), (l))
+#define kvm_flush_dcache_to_poc(a,l)		__flush_dcache_area((a), (l))
+#define kvm_invalidate_cache_to_poc(a,l)	__dma_unmap_area((a), (l), 0)
 
 static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
 {
@@ -265,12 +266,14 @@  static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
 }
 
 static inline void __coherent_cache_guest_page(pfn_t pfn, unsigned long size,
-					       bool need_flush)
+					       bool need_flush, bool invalidate)
 {
 	void *va = page_address(pfn_to_page(pfn));
 
 	if (need_flush)
 		kvm_flush_dcache_to_poc(va, size);
+	if (invalidate)
+		kvm_invalidate_cache_to_poc(va, size);
 
 	if (!icache_is_aliasing()) {		/* PIPT */
 		flush_icache_range((unsigned long)va,