diff mbox series

[v1,2/4] KVM: PPC: Book3S HV: track shared GFNs of secure VMs

Message ID 1590892071-25549-3-git-send-email-linuxram@us.ibm.com (mailing list archive)
State Not Applicable
Headers show
Series Migrate non-migrated pages of a SVM. | expand

Checks

Context Check Description
snowpatch_ozlabs/apply_patch success Successfully applied on branch powerpc/merge (00ec79b0b767994422c43792d73ff1327714a73f)
snowpatch_ozlabs/checkpatch warning total: 0 errors, 0 warnings, 2 checks, 217 lines checked
snowpatch_ozlabs/needsstable success Patch has no Fixes tags

Commit Message

Ram Pai May 31, 2020, 2:27 a.m. UTC
During the life of SVM, its GFNs can transition from secure to shared
state and vice-versa. Since the kernel does not track GFNs that are
shared, it is not possible to disambiguate a shared GFN from a GFN whose
PFN has not yet been migrated to a device-PFN.

The ability to identify a shared GFN is needed to skip migrating its PFN
to device PFN. This functionality is leveraged in a subsequent patch.

Add the ability to identify the state of a GFN.

Cc: Paul Mackerras <paulus@ozlabs.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Bharata B Rao <bharata@linux.ibm.com>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Cc: Sukadev Bhattiprolu <sukadev@linux.vnet.ibm.com>
Cc: Laurent Dufour <ldufour@linux.ibm.com>
Cc: Thiago Jung Bauermann <bauerman@linux.ibm.com>
Cc: David Gibson <david@gibson.dropbear.id.au>
Cc: Claudio Carvalho <cclaudio@linux.ibm.com>
Cc: kvm-ppc@vger.kernel.org
Cc: linuxppc-dev@lists.ozlabs.org
Reviewed-by: Thiago Jung Bauermann <bauerman@linux.ibm.com>
Signed-off-by: Ram Pai <linuxram@us.ibm.com>
---
 arch/powerpc/include/asm/kvm_book3s_uvmem.h |   6 +-
 arch/powerpc/kvm/book3s_64_mmu_radix.c      |   2 +-
 arch/powerpc/kvm/book3s_hv.c                |   2 +-
 arch/powerpc/kvm/book3s_hv_uvmem.c          | 115 ++++++++++++++++++++++++++--
 4 files changed, 113 insertions(+), 12 deletions(-)

Comments

kernel test robot June 1, 2020, 4:12 a.m. UTC | #1
Hi Ram,

Thank you for the patch! Perhaps something to improve:

[auto build test WARNING on powerpc/next]
[also build test WARNING on v5.7 next-20200529]
[if your patch is applied to the wrong git tree, please drop us a note to help
improve the system. BTW, we also suggest to use '--base' option to specify the
base tree in git format-patch, please see https://stackoverflow.com/a/37406982]

url:    https://github.com/0day-ci/linux/commits/Ram-Pai/Migrate-non-migrated-pages-of-a-SVM/20200601-034649
base:   https://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git next
config: powerpc-allyesconfig (attached as .config)
compiler: powerpc64-linux-gcc (GCC) 9.3.0
reproduce (this is a W=1 build):
        wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # save the attached .config to linux build tree
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-9.3.0 make.cross ARCH=powerpc 

If you fix the issue, kindly add following tag as appropriate
Reported-by: kbuild test robot <lkp@intel.com>

All warnings (new ones prefixed by >>, old ones prefixed by <<):

arch/powerpc/kvm/book3s_hv_uvmem.c:158:6: warning: no previous prototype for 'kvmppc_uvmem_available' [-Wmissing-prototypes]
158 | bool kvmppc_uvmem_available(void)
|      ^~~~~~~~~~~~~~~~~~~~~~
arch/powerpc/kvm/book3s_hv_uvmem.c:167:5: warning: no previous prototype for 'kvmppc_uvmem_slot_init' [-Wmissing-prototypes]
167 | int kvmppc_uvmem_slot_init(struct kvm *kvm, const struct kvm_memory_slot *slot)
|     ^~~~~~~~~~~~~~~~~~~~~~
arch/powerpc/kvm/book3s_hv_uvmem.c:192:6: warning: no previous prototype for 'kvmppc_uvmem_slot_free' [-Wmissing-prototypes]
192 | void kvmppc_uvmem_slot_free(struct kvm *kvm, const struct kvm_memory_slot *slot)
|      ^~~~~~~~~~~~~~~~~~~~~~
>> arch/powerpc/kvm/book3s_hv_uvmem.c:279:6: warning: no previous prototype for 'kvmppc_gfn_is_uvmem_shared' [-Wmissing-prototypes]
279 | bool kvmppc_gfn_is_uvmem_shared(unsigned long gfn, struct kvm *kvm)
|      ^~~~~~~~~~~~~~~~~~~~~~~~~~
arch/powerpc/kvm/book3s_hv_uvmem.c:293:15: warning: no previous prototype for 'kvmppc_h_svm_init_start' [-Wmissing-prototypes]
293 | unsigned long kvmppc_h_svm_init_start(struct kvm *kvm)
|               ^~~~~~~~~~~~~~~~~~~~~~~
arch/powerpc/kvm/book3s_hv_uvmem.c:335:15: warning: no previous prototype for 'kvmppc_h_svm_init_done' [-Wmissing-prototypes]
335 | unsigned long kvmppc_h_svm_init_done(struct kvm *kvm)
|               ^~~~~~~~~~~~~~~~~~~~~~
arch/powerpc/kvm/book3s_hv_uvmem.c:356:6: warning: no previous prototype for 'kvmppc_uvmem_drop_pages' [-Wmissing-prototypes]
356 | void kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *free,
|      ^~~~~~~~~~~~~~~~~~~~~~~
arch/powerpc/kvm/book3s_hv_uvmem.c:397:15: warning: no previous prototype for 'kvmppc_h_svm_init_abort' [-Wmissing-prototypes]
397 | unsigned long kvmppc_h_svm_init_abort(struct kvm *kvm)
|               ^~~~~~~~~~~~~~~~~~~~~~~
arch/powerpc/kvm/book3s_hv_uvmem.c:599:15: warning: no previous prototype for 'kvmppc_h_svm_page_in' [-Wmissing-prototypes]
599 | unsigned long kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gpa,
|               ^~~~~~~~~~~~~~~~~~~~
arch/powerpc/kvm/book3s_hv_uvmem.c:784:1: warning: no previous prototype for 'kvmppc_h_svm_page_out' [-Wmissing-prototypes]
784 | kvmppc_h_svm_page_out(struct kvm *kvm, unsigned long gpa,
| ^~~~~~~~~~~~~~~~~~~~~
arch/powerpc/kvm/book3s_hv_uvmem.c:822:5: warning: no previous prototype for 'kvmppc_send_page_to_uv' [-Wmissing-prototypes]
822 | int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn)
|     ^~~~~~~~~~~~~~~~~~~~~~
arch/powerpc/kvm/book3s_hv_uvmem.c:867:5: warning: no previous prototype for 'kvmppc_uvmem_init' [-Wmissing-prototypes]
867 | int kvmppc_uvmem_init(void)
|     ^~~~~~~~~~~~~~~~~
arch/powerpc/kvm/book3s_hv_uvmem.c:922:6: warning: no previous prototype for 'kvmppc_uvmem_free' [-Wmissing-prototypes]
922 | void kvmppc_uvmem_free(void)
|      ^~~~~~~~~~~~~~~~~

vim +/kvmppc_gfn_is_uvmem_shared +279 arch/powerpc/kvm/book3s_hv_uvmem.c

   157	
 > 158	bool kvmppc_uvmem_available(void)
   159	{
   160		/*
   161		 * If kvmppc_uvmem_bitmap != NULL, then there is an ultravisor
   162		 * and our data structures have been initialized successfully.
   163		 */
   164		return !!kvmppc_uvmem_bitmap;
   165	}
   166	
   167	int kvmppc_uvmem_slot_init(struct kvm *kvm, const struct kvm_memory_slot *slot)
   168	{
   169		struct kvmppc_uvmem_slot *p;
   170	
   171		p = kzalloc(sizeof(*p), GFP_KERNEL);
   172		if (!p)
   173			return -ENOMEM;
   174		p->pfns = vzalloc(array_size(slot->npages, sizeof(*p->pfns)));
   175		if (!p->pfns) {
   176			kfree(p);
   177			return -ENOMEM;
   178		}
   179		p->nr_pfns = slot->npages;
   180		p->base_pfn = slot->base_gfn;
   181	
   182		mutex_lock(&kvm->arch.uvmem_lock);
   183		list_add(&p->list, &kvm->arch.uvmem_pfns);
   184		mutex_unlock(&kvm->arch.uvmem_lock);
   185	
   186		return 0;
   187	}
   188	
   189	/*
   190	 * All device PFNs are already released by the time we come here.
   191	 */
   192	void kvmppc_uvmem_slot_free(struct kvm *kvm, const struct kvm_memory_slot *slot)
   193	{
   194		struct kvmppc_uvmem_slot *p, *next;
   195	
   196		mutex_lock(&kvm->arch.uvmem_lock);
   197		list_for_each_entry_safe(p, next, &kvm->arch.uvmem_pfns, list) {
   198			if (p->base_pfn == slot->base_gfn) {
   199				vfree(p->pfns);
   200				list_del(&p->list);
   201				kfree(p);
   202				break;
   203			}
   204		}
   205		mutex_unlock(&kvm->arch.uvmem_lock);
   206	}
   207	
   208	static void kvmppc_uvmem_pfn_insert(unsigned long gfn, unsigned long uvmem_pfn,
   209					    struct kvm *kvm)
   210	{
   211		struct kvmppc_uvmem_slot *p;
   212	
   213		list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) {
   214			if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) {
   215				unsigned long index = gfn - p->base_pfn;
   216	
   217				p->pfns[index] = uvmem_pfn | KVMPPC_UVMEM_PFN;
   218				return;
   219			}
   220		}
   221	}
   222	
   223	static void kvmppc_uvmem_pfn_remove(unsigned long gfn, struct kvm *kvm)
   224	{
   225		struct kvmppc_uvmem_slot *p;
   226	
   227		list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) {
   228			if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) {
   229				/*
   230				 * Reset everything, but keep the KVMPPC_UVMEM_SHARED
   231				 * flag intact.  A gfn continues to be shared or
   232				 * unshared, with or without an associated device pfn.
   233				 */
   234				p->pfns[gfn - p->base_pfn] &= KVMPPC_UVMEM_SHARED;
   235				return;
   236			}
   237		}
   238	}
   239	
   240	static bool kvmppc_gfn_is_uvmem_pfn(unsigned long gfn, struct kvm *kvm,
   241					    unsigned long *uvmem_pfn)
   242	{
   243		struct kvmppc_uvmem_slot *p;
   244	
   245		list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) {
   246			if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) {
   247				unsigned long index = gfn - p->base_pfn;
   248	
   249				if (p->pfns[index] & KVMPPC_UVMEM_PFN) {
   250					if (uvmem_pfn)
   251						*uvmem_pfn = p->pfns[index] &
   252							     KVMPPC_UVMEM_PFN_MASK;
   253					return true;
   254				} else
   255					return false;
   256			}
   257		}
   258		return false;
   259	}
   260	
   261	static void kvmppc_gfn_uvmem_shared(unsigned long gfn, struct kvm *kvm,
   262			bool set)
   263	{
   264		struct kvmppc_uvmem_slot *p;
   265	
   266		list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) {
   267			if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) {
   268				unsigned long index = gfn - p->base_pfn;
   269	
   270				if (set)
   271					p->pfns[index] |= KVMPPC_UVMEM_SHARED;
   272				else
   273					p->pfns[index] &= ~KVMPPC_UVMEM_SHARED;
   274				return;
   275			}
   276		}
   277	}
   278	
 > 279	bool kvmppc_gfn_is_uvmem_shared(unsigned long gfn, struct kvm *kvm)
   280	{
   281		struct kvmppc_uvmem_slot *p;
   282	
   283		list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) {
   284			if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) {
   285				unsigned long index = gfn - p->base_pfn;
   286	
   287				return (p->pfns[index] & KVMPPC_UVMEM_SHARED);
   288			}
   289		}
   290		return false;
   291	}
   292	

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
Laurent Dufour June 5, 2020, 9:48 a.m. UTC | #2
Le 31/05/2020 à 04:27, Ram Pai a écrit :
> During the life of SVM, its GFNs can transition from secure to shared
> state and vice-versa. Since the kernel does not track GFNs that are
> shared, it is not possible to disambiguate a shared GFN from a GFN whose
> PFN has not yet been migrated to a device-PFN.
> 
> The ability to identify a shared GFN is needed to skip migrating its PFN
> to device PFN. This functionality is leveraged in a subsequent patch.
> 
> Add the ability to identify the state of a GFN.
> 
> Cc: Paul Mackerras <paulus@ozlabs.org>
> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
> Cc: Michael Ellerman <mpe@ellerman.id.au>
> Cc: Bharata B Rao <bharata@linux.ibm.com>
> Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
> Cc: Sukadev Bhattiprolu <sukadev@linux.vnet.ibm.com>
> Cc: Laurent Dufour <ldufour@linux.ibm.com>
> Cc: Thiago Jung Bauermann <bauerman@linux.ibm.com>
> Cc: David Gibson <david@gibson.dropbear.id.au>
> Cc: Claudio Carvalho <cclaudio@linux.ibm.com>
> Cc: kvm-ppc@vger.kernel.org
> Cc: linuxppc-dev@lists.ozlabs.org
> Reviewed-by: Thiago Jung Bauermann <bauerman@linux.ibm.com>
> Signed-off-by: Ram Pai <linuxram@us.ibm.com>
> ---
>   arch/powerpc/include/asm/kvm_book3s_uvmem.h |   6 +-
>   arch/powerpc/kvm/book3s_64_mmu_radix.c      |   2 +-
>   arch/powerpc/kvm/book3s_hv.c                |   2 +-
>   arch/powerpc/kvm/book3s_hv_uvmem.c          | 115 ++++++++++++++++++++++++++--
>   4 files changed, 113 insertions(+), 12 deletions(-)
> 
> diff --git a/arch/powerpc/include/asm/kvm_book3s_uvmem.h b/arch/powerpc/include/asm/kvm_book3s_uvmem.h
> index 5a9834e..f0c5708 100644
> --- a/arch/powerpc/include/asm/kvm_book3s_uvmem.h
> +++ b/arch/powerpc/include/asm/kvm_book3s_uvmem.h
> @@ -21,7 +21,8 @@ unsigned long kvmppc_h_svm_page_out(struct kvm *kvm,
>   int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn);
>   unsigned long kvmppc_h_svm_init_abort(struct kvm *kvm);
>   void kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *free,
> -			     struct kvm *kvm, bool skip_page_out);
> +			     struct kvm *kvm, bool skip_page_out,
> +			     bool purge_gfn);
>   #else
>   static inline int kvmppc_uvmem_init(void)
>   {
> @@ -75,6 +76,7 @@ static inline int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn)
>   
>   static inline void
>   kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *free,
> -			struct kvm *kvm, bool skip_page_out) { }
> +			struct kvm *kvm, bool skip_page_out,
> +			bool purge_gfn) { }
>   #endif /* CONFIG_PPC_UV */
>   #endif /* __ASM_KVM_BOOK3S_UVMEM_H__ */
> diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
> index 803940d..3448459 100644
> --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
> +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
> @@ -1100,7 +1100,7 @@ void kvmppc_radix_flush_memslot(struct kvm *kvm,
>   	unsigned int shift;
>   
>   	if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)
> -		kvmppc_uvmem_drop_pages(memslot, kvm, true);
> +		kvmppc_uvmem_drop_pages(memslot, kvm, true, false);

Why purge_gfn is false here?
That call function is called when dropping an hot plugged memslot.
That's being said, when called by kvmppc_core_commit_memory_region_hv(), the mem 
slot is then free by kvmppc_uvmem_slot_free() so that shared state will not 
remain long but there is a window...

>   
>   	if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
>   		return;
> diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
> index 103d13e..4c62bfe 100644
> --- a/arch/powerpc/kvm/book3s_hv.c
> +++ b/arch/powerpc/kvm/book3s_hv.c
> @@ -5467,7 +5467,7 @@ static int kvmhv_svm_off(struct kvm *kvm)
>   			continue;
>   
>   		kvm_for_each_memslot(memslot, slots) {
> -			kvmppc_uvmem_drop_pages(memslot, kvm, true);
> +			kvmppc_uvmem_drop_pages(memslot, kvm, true, true);
>   			uv_unregister_mem_slot(kvm->arch.lpid, memslot->id);
>   		}
>   	}
> diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c
> index ea4a1f1..2ef1e03 100644
> --- a/arch/powerpc/kvm/book3s_hv_uvmem.c
> +++ b/arch/powerpc/kvm/book3s_hv_uvmem.c
> @@ -99,14 +99,56 @@
>   static DEFINE_SPINLOCK(kvmppc_uvmem_bitmap_lock);
>   
>   #define KVMPPC_UVMEM_PFN	(1UL << 63)
> +#define KVMPPC_UVMEM_SHARED	(1UL << 62)
> +#define KVMPPC_UVMEM_FLAG_MASK	(KVMPPC_UVMEM_PFN | KVMPPC_UVMEM_SHARED)
> +#define KVMPPC_UVMEM_PFN_MASK	(~KVMPPC_UVMEM_FLAG_MASK)
>   
>   struct kvmppc_uvmem_slot {
>   	struct list_head list;
>   	unsigned long nr_pfns;
>   	unsigned long base_pfn;
> +	/*
> +	 * pfns array has an entry for each GFN of the memory slot.
> +	 *
> +	 * The GFN can be in one of the following states.
> +	 *
> +	 * (a) Secure - The GFN is secure. Only Ultravisor can access it.
> +	 * (b) Shared - The GFN is shared. Both Hypervisor and Ultravisor
> +	 *		can access it.
> +	 * (c) Normal - The GFN is a normal.  Only Hypervisor can access it.
> +	 *
> +	 * Secure GFN is associated with a devicePFN. Its pfn[] has
> +	 * KVMPPC_UVMEM_PFN flag set, and has the value of the device PFN
> +	 * KVMPPC_UVMEM_SHARED flag unset, and has the value of the device PFN
> +	 *
> +	 * Shared GFN is associated with a memoryPFN. Its pfn[] has
> +	 * KVMPPC_UVMEM_SHARED flag set. But its KVMPPC_UVMEM_PFN is not set,
> +	 * and there is no PFN value stored.
> +	 *
> +	 * Normal GFN is not associated with memoryPFN. Its pfn[] has
> +	 * KVMPPC_UVMEM_SHARED and KVMPPC_UVMEM_PFN flag unset, and no PFN
> +	 * value is stored.
> +	 *
> +	 * Any other combination of values in pfn[] leads to undefined
> +	 * behavior.
> +	 *
> +	 * Life cycle of a GFN --
> +	 *
> +	 * ---------------------------------------------------------
> +	 * |        |     Share	 |  Unshare | SVM	|slot      |
> +	 * |	    |		 |	    | abort/	|flush	   |
> +	 * |	    |		 |	    | terminate	|	   |
> +	 * ---------------------------------------------------------
> +	 * |        |            |          |           |          |
> +	 * | Secure |     Shared | Secure   |Normal	|Secure    |
> +	 * |        |            |          |           |          |
> +	 * | Shared |     Shared | Secure   |Normal     |Shared    |
> +	 * |        |            |          |           |          |
> +	 * | Normal |     Shared | Secure   |Normal     |Normal    |
> +	 * ---------------------------------------------------------
> +	 */
>   	unsigned long *pfns;
>   };
> -
>   struct kvmppc_uvmem_page_pvt {
>   	struct kvm *kvm;
>   	unsigned long gpa;
> @@ -175,7 +217,12 @@ static void kvmppc_uvmem_pfn_remove(unsigned long gfn, struct kvm *kvm)
>   
>   	list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) {
>   		if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) {
> -			p->pfns[gfn - p->base_pfn] = 0;
> +			/*
> +			 * Reset everything, but keep the KVMPPC_UVMEM_SHARED
> +			 * flag intact.  A gfn continues to be shared or
> +			 * unshared, with or without an associated device pfn.
> +			 */
> +			p->pfns[gfn - p->base_pfn] &= KVMPPC_UVMEM_SHARED;
>   			return;
>   		}
>   	}
> @@ -193,7 +240,7 @@ static bool kvmppc_gfn_is_uvmem_pfn(unsigned long gfn, struct kvm *kvm,
>   			if (p->pfns[index] & KVMPPC_UVMEM_PFN) {
>   				if (uvmem_pfn)
>   					*uvmem_pfn = p->pfns[index] &
> -						     ~KVMPPC_UVMEM_PFN;
> +						     KVMPPC_UVMEM_PFN_MASK;
>   				return true;
>   			} else
>   				return false;
> @@ -202,6 +249,38 @@ static bool kvmppc_gfn_is_uvmem_pfn(unsigned long gfn, struct kvm *kvm,
>   	return false;
>   }
>   
> +static void kvmppc_gfn_uvmem_shared(unsigned long gfn, struct kvm *kvm,
> +		bool set)
> +{
> +	struct kvmppc_uvmem_slot *p;
> +
> +	list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) {
> +		if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) {
> +			unsigned long index = gfn - p->base_pfn;
> +
> +			if (set)
> +				p->pfns[index] |= KVMPPC_UVMEM_SHARED;
> +			else
> +				p->pfns[index] &= ~KVMPPC_UVMEM_SHARED;
> +			return;
> +		}
> +	}
> +}
> +
> +bool kvmppc_gfn_is_uvmem_shared(unsigned long gfn, struct kvm *kvm)
> +{
> +	struct kvmppc_uvmem_slot *p;
> +
> +	list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) {
> +		if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) {
> +			unsigned long index = gfn - p->base_pfn;
> +
> +			return (p->pfns[index] & KVMPPC_UVMEM_SHARED);
> +		}
> +	}
> +	return false;
> +}
> +
>   unsigned long kvmppc_h_svm_init_start(struct kvm *kvm)
>   {
>   	struct kvm_memslots *slots;
> @@ -256,9 +335,13 @@ unsigned long kvmppc_h_svm_init_done(struct kvm *kvm)
>    * is HV side fault on these pages. Next we *get* these pages, forcing
>    * fault on them, do fault time migration to replace the device PTEs in
>    * QEMU page table with normal PTEs from newly allocated pages.
> + *
> + * if @purge_gfn is set, cleanup any information related to each of
> + * the GFNs associated with this memory slot.
>    */
>   void kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *free,
> -			     struct kvm *kvm, bool skip_page_out)
> +			     struct kvm *kvm, bool skip_page_out,
> +			     bool purge_gfn)
>   {
>   	int i;
>   	struct kvmppc_uvmem_page_pvt *pvt;
> @@ -269,11 +352,22 @@ void kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *free,
>   		struct page *uvmem_page;
>   
>   		mutex_lock(&kvm->arch.uvmem_lock);
> +
> +		if (purge_gfn) {
> +			/*
> +			 * cleanup the shared status of the GFN here.
> +			 * Any device PFN associated with the GFN shall
> +			 * be cleaned up later, in kvmppc_uvmem_page_free()
> +			 * when the device PFN is actually disassociated
> +			 * from the GFN.
> +			 */
> +			kvmppc_gfn_uvmem_shared(gfn, kvm, false);
> +		}
> +
>   		if (!kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) {
>   			mutex_unlock(&kvm->arch.uvmem_lock);
>   			continue;
>   		}
> -
>   		uvmem_page = pfn_to_page(uvmem_pfn);
>   		pvt = uvmem_page->zone_device_data;
>   		pvt->skip_page_out = skip_page_out;
> @@ -304,7 +398,7 @@ unsigned long kvmppc_h_svm_init_abort(struct kvm *kvm)
>   	srcu_idx = srcu_read_lock(&kvm->srcu);
>   
>   	kvm_for_each_memslot(memslot, kvm_memslots(kvm))
> -		kvmppc_uvmem_drop_pages(memslot, kvm, false);
> +		kvmppc_uvmem_drop_pages(memslot, kvm, false, true);
>   
>   	srcu_read_unlock(&kvm->srcu, srcu_idx);
>   
> @@ -470,8 +564,11 @@ static unsigned long kvmppc_share_page(struct kvm *kvm, unsigned long gpa,
>   		goto retry;
>   	}
>   
> -	if (!uv_page_in(kvm->arch.lpid, pfn << page_shift, gpa, 0, page_shift))
> +	if (!uv_page_in(kvm->arch.lpid, pfn << page_shift, gpa, 0,
> +				page_shift)) {
> +		kvmppc_gfn_uvmem_shared(gfn, kvm, true);
>   		ret = H_SUCCESS;
> +	}
>   	kvm_release_pfn_clean(pfn);
>   	mutex_unlock(&kvm->arch.uvmem_lock);
>   out:
> @@ -527,8 +624,10 @@ unsigned long kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gpa,
>   		goto out_unlock;
>   
>   	if (!kvmppc_svm_page_in(vma, start, end, gpa, kvm, page_shift,
> -				&downgrade))
> +				&downgrade)) {
> +		kvmppc_gfn_uvmem_shared(gfn, kvm, false);
>   		ret = H_SUCCESS;
> +	}
>   out_unlock:
>   	mutex_unlock(&kvm->arch.uvmem_lock);
>   out:
>
Ram Pai June 5, 2020, 2:38 p.m. UTC | #3
> >diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
> >index 803940d..3448459 100644
> >--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
> >+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
> >@@ -1100,7 +1100,7 @@ void kvmppc_radix_flush_memslot(struct kvm *kvm,
> >  	unsigned int shift;
> >  	if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)
> >-		kvmppc_uvmem_drop_pages(memslot, kvm, true);
> >+		kvmppc_uvmem_drop_pages(memslot, kvm, true, false);
> 
> Why purge_gfn is false here?
> That call function is called when dropping an hot plugged memslot.

This function does not know, under what context it is called. Since
its job is to just flush the memslot, it cannot assume anything
about purging the pages in the memslot.

.snip..


RP
diff mbox series

Patch

diff --git a/arch/powerpc/include/asm/kvm_book3s_uvmem.h b/arch/powerpc/include/asm/kvm_book3s_uvmem.h
index 5a9834e..f0c5708 100644
--- a/arch/powerpc/include/asm/kvm_book3s_uvmem.h
+++ b/arch/powerpc/include/asm/kvm_book3s_uvmem.h
@@ -21,7 +21,8 @@  unsigned long kvmppc_h_svm_page_out(struct kvm *kvm,
 int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn);
 unsigned long kvmppc_h_svm_init_abort(struct kvm *kvm);
 void kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *free,
-			     struct kvm *kvm, bool skip_page_out);
+			     struct kvm *kvm, bool skip_page_out,
+			     bool purge_gfn);
 #else
 static inline int kvmppc_uvmem_init(void)
 {
@@ -75,6 +76,7 @@  static inline int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn)
 
 static inline void
 kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *free,
-			struct kvm *kvm, bool skip_page_out) { }
+			struct kvm *kvm, bool skip_page_out,
+			bool purge_gfn) { }
 #endif /* CONFIG_PPC_UV */
 #endif /* __ASM_KVM_BOOK3S_UVMEM_H__ */
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index 803940d..3448459 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -1100,7 +1100,7 @@  void kvmppc_radix_flush_memslot(struct kvm *kvm,
 	unsigned int shift;
 
 	if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)
-		kvmppc_uvmem_drop_pages(memslot, kvm, true);
+		kvmppc_uvmem_drop_pages(memslot, kvm, true, false);
 
 	if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
 		return;
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 103d13e..4c62bfe 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -5467,7 +5467,7 @@  static int kvmhv_svm_off(struct kvm *kvm)
 			continue;
 
 		kvm_for_each_memslot(memslot, slots) {
-			kvmppc_uvmem_drop_pages(memslot, kvm, true);
+			kvmppc_uvmem_drop_pages(memslot, kvm, true, true);
 			uv_unregister_mem_slot(kvm->arch.lpid, memslot->id);
 		}
 	}
diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c
index ea4a1f1..2ef1e03 100644
--- a/arch/powerpc/kvm/book3s_hv_uvmem.c
+++ b/arch/powerpc/kvm/book3s_hv_uvmem.c
@@ -99,14 +99,56 @@ 
 static DEFINE_SPINLOCK(kvmppc_uvmem_bitmap_lock);
 
 #define KVMPPC_UVMEM_PFN	(1UL << 63)
+#define KVMPPC_UVMEM_SHARED	(1UL << 62)
+#define KVMPPC_UVMEM_FLAG_MASK	(KVMPPC_UVMEM_PFN | KVMPPC_UVMEM_SHARED)
+#define KVMPPC_UVMEM_PFN_MASK	(~KVMPPC_UVMEM_FLAG_MASK)
 
 struct kvmppc_uvmem_slot {
 	struct list_head list;
 	unsigned long nr_pfns;
 	unsigned long base_pfn;
+	/*
+	 * pfns array has an entry for each GFN of the memory slot.
+	 *
+	 * The GFN can be in one of the following states.
+	 *
+	 * (a) Secure - The GFN is secure. Only Ultravisor can access it.
+	 * (b) Shared - The GFN is shared. Both Hypervisor and Ultravisor
+	 *		can access it.
+	 * (c) Normal - The GFN is a normal.  Only Hypervisor can access it.
+	 *
+	 * Secure GFN is associated with a devicePFN. Its pfn[] has
+	 * KVMPPC_UVMEM_PFN flag set, and has the value of the device PFN
+	 * KVMPPC_UVMEM_SHARED flag unset, and has the value of the device PFN
+	 *
+	 * Shared GFN is associated with a memoryPFN. Its pfn[] has
+	 * KVMPPC_UVMEM_SHARED flag set. But its KVMPPC_UVMEM_PFN is not set,
+	 * and there is no PFN value stored.
+	 *
+	 * Normal GFN is not associated with memoryPFN. Its pfn[] has
+	 * KVMPPC_UVMEM_SHARED and KVMPPC_UVMEM_PFN flag unset, and no PFN
+	 * value is stored.
+	 *
+	 * Any other combination of values in pfn[] leads to undefined
+	 * behavior.
+	 *
+	 * Life cycle of a GFN --
+	 *
+	 * ---------------------------------------------------------
+	 * |        |     Share	 |  Unshare | SVM	|slot      |
+	 * |	    |		 |	    | abort/	|flush	   |
+	 * |	    |		 |	    | terminate	|	   |
+	 * ---------------------------------------------------------
+	 * |        |            |          |           |          |
+	 * | Secure |     Shared | Secure   |Normal	|Secure    |
+	 * |        |            |          |           |          |
+	 * | Shared |     Shared | Secure   |Normal     |Shared    |
+	 * |        |            |          |           |          |
+	 * | Normal |     Shared | Secure   |Normal     |Normal    |
+	 * ---------------------------------------------------------
+	 */
 	unsigned long *pfns;
 };
-
 struct kvmppc_uvmem_page_pvt {
 	struct kvm *kvm;
 	unsigned long gpa;
@@ -175,7 +217,12 @@  static void kvmppc_uvmem_pfn_remove(unsigned long gfn, struct kvm *kvm)
 
 	list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) {
 		if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) {
-			p->pfns[gfn - p->base_pfn] = 0;
+			/*
+			 * Reset everything, but keep the KVMPPC_UVMEM_SHARED
+			 * flag intact.  A gfn continues to be shared or
+			 * unshared, with or without an associated device pfn.
+			 */
+			p->pfns[gfn - p->base_pfn] &= KVMPPC_UVMEM_SHARED;
 			return;
 		}
 	}
@@ -193,7 +240,7 @@  static bool kvmppc_gfn_is_uvmem_pfn(unsigned long gfn, struct kvm *kvm,
 			if (p->pfns[index] & KVMPPC_UVMEM_PFN) {
 				if (uvmem_pfn)
 					*uvmem_pfn = p->pfns[index] &
-						     ~KVMPPC_UVMEM_PFN;
+						     KVMPPC_UVMEM_PFN_MASK;
 				return true;
 			} else
 				return false;
@@ -202,6 +249,38 @@  static bool kvmppc_gfn_is_uvmem_pfn(unsigned long gfn, struct kvm *kvm,
 	return false;
 }
 
+static void kvmppc_gfn_uvmem_shared(unsigned long gfn, struct kvm *kvm,
+		bool set)
+{
+	struct kvmppc_uvmem_slot *p;
+
+	list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) {
+		if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) {
+			unsigned long index = gfn - p->base_pfn;
+
+			if (set)
+				p->pfns[index] |= KVMPPC_UVMEM_SHARED;
+			else
+				p->pfns[index] &= ~KVMPPC_UVMEM_SHARED;
+			return;
+		}
+	}
+}
+
+bool kvmppc_gfn_is_uvmem_shared(unsigned long gfn, struct kvm *kvm)
+{
+	struct kvmppc_uvmem_slot *p;
+
+	list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) {
+		if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) {
+			unsigned long index = gfn - p->base_pfn;
+
+			return (p->pfns[index] & KVMPPC_UVMEM_SHARED);
+		}
+	}
+	return false;
+}
+
 unsigned long kvmppc_h_svm_init_start(struct kvm *kvm)
 {
 	struct kvm_memslots *slots;
@@ -256,9 +335,13 @@  unsigned long kvmppc_h_svm_init_done(struct kvm *kvm)
  * is HV side fault on these pages. Next we *get* these pages, forcing
  * fault on them, do fault time migration to replace the device PTEs in
  * QEMU page table with normal PTEs from newly allocated pages.
+ *
+ * if @purge_gfn is set, cleanup any information related to each of
+ * the GFNs associated with this memory slot.
  */
 void kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *free,
-			     struct kvm *kvm, bool skip_page_out)
+			     struct kvm *kvm, bool skip_page_out,
+			     bool purge_gfn)
 {
 	int i;
 	struct kvmppc_uvmem_page_pvt *pvt;
@@ -269,11 +352,22 @@  void kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *free,
 		struct page *uvmem_page;
 
 		mutex_lock(&kvm->arch.uvmem_lock);
+
+		if (purge_gfn) {
+			/*
+			 * cleanup the shared status of the GFN here.
+			 * Any device PFN associated with the GFN shall
+			 * be cleaned up later, in kvmppc_uvmem_page_free()
+			 * when the device PFN is actually disassociated
+			 * from the GFN.
+			 */
+			kvmppc_gfn_uvmem_shared(gfn, kvm, false);
+		}
+
 		if (!kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) {
 			mutex_unlock(&kvm->arch.uvmem_lock);
 			continue;
 		}
-
 		uvmem_page = pfn_to_page(uvmem_pfn);
 		pvt = uvmem_page->zone_device_data;
 		pvt->skip_page_out = skip_page_out;
@@ -304,7 +398,7 @@  unsigned long kvmppc_h_svm_init_abort(struct kvm *kvm)
 	srcu_idx = srcu_read_lock(&kvm->srcu);
 
 	kvm_for_each_memslot(memslot, kvm_memslots(kvm))
-		kvmppc_uvmem_drop_pages(memslot, kvm, false);
+		kvmppc_uvmem_drop_pages(memslot, kvm, false, true);
 
 	srcu_read_unlock(&kvm->srcu, srcu_idx);
 
@@ -470,8 +564,11 @@  static unsigned long kvmppc_share_page(struct kvm *kvm, unsigned long gpa,
 		goto retry;
 	}
 
-	if (!uv_page_in(kvm->arch.lpid, pfn << page_shift, gpa, 0, page_shift))
+	if (!uv_page_in(kvm->arch.lpid, pfn << page_shift, gpa, 0,
+				page_shift)) {
+		kvmppc_gfn_uvmem_shared(gfn, kvm, true);
 		ret = H_SUCCESS;
+	}
 	kvm_release_pfn_clean(pfn);
 	mutex_unlock(&kvm->arch.uvmem_lock);
 out:
@@ -527,8 +624,10 @@  unsigned long kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gpa,
 		goto out_unlock;
 
 	if (!kvmppc_svm_page_in(vma, start, end, gpa, kvm, page_shift,
-				&downgrade))
+				&downgrade)) {
+		kvmppc_gfn_uvmem_shared(gfn, kvm, false);
 		ret = H_SUCCESS;
+	}
 out_unlock:
 	mutex_unlock(&kvm->arch.uvmem_lock);
 out: