diff mbox series

[RFC,v1,2/4] kvmppc: Add support for shared pages in HMM driver

Message ID 20181022051837.1165-3-bharata@linux.ibm.com (mailing list archive)
State New, archived
Headers show
Series kvmppc: HMM backend driver to manage pages of secure guest | expand

Commit Message

Bharata B Rao Oct. 22, 2018, 5:18 a.m. UTC
A secure guest will share some of its pages with hypervisor (Eg. virtio
bounce buffers etc). Support shared pages in HMM driver.

Signed-off-by: Bharata B Rao <bharata@linux.ibm.com>
---
 arch/powerpc/kvm/book3s_hv_hmm.c | 69 ++++++++++++++++++++++++++++++--
 1 file changed, 65 insertions(+), 4 deletions(-)

Comments

Paul Mackerras Oct. 30, 2018, 5:26 a.m. UTC | #1
On Mon, Oct 22, 2018 at 10:48:35AM +0530, Bharata B Rao wrote:
> A secure guest will share some of its pages with hypervisor (Eg. virtio
> bounce buffers etc). Support shared pages in HMM driver.
> 
> Signed-off-by: Bharata B Rao <bharata@linux.ibm.com>

Comments below...

> ---
>  arch/powerpc/kvm/book3s_hv_hmm.c | 69 ++++++++++++++++++++++++++++++--
>  1 file changed, 65 insertions(+), 4 deletions(-)
> 
> diff --git a/arch/powerpc/kvm/book3s_hv_hmm.c b/arch/powerpc/kvm/book3s_hv_hmm.c
> index a2ee3163a312..09b8e19b7605 100644
> --- a/arch/powerpc/kvm/book3s_hv_hmm.c
> +++ b/arch/powerpc/kvm/book3s_hv_hmm.c
> @@ -50,6 +50,7 @@ struct kvmppc_hmm_page_pvt {
>  	struct hlist_head *hmm_hash;
>  	unsigned int lpid;
>  	unsigned long gpa;
> +	bool skip_page_out;
>  };
>  
>  struct kvmppc_hmm_migrate_args {
> @@ -278,6 +279,65 @@ static unsigned long kvmppc_gpa_to_hva(struct kvm *kvm, unsigned long gpa,
>  	return hva;
>  }
>  
> +/*
> + * Shares the page with HV, thus making it a normal page.
> + *
> + * - If the page is already secure, then provision a new page and share
> + * - If the page is a normal page, share the existing page
> + *
> + * In the former case, uses the HMM fault handler to release the HMM page.
> + */
> +static unsigned long
> +kvmppc_share_page(struct kvm *kvm, unsigned long gpa,
> +		  unsigned long addr, unsigned long page_shift)
> +{
> +
> +	int ret;
> +	struct hlist_head *list, *hmm_hash;
> +	unsigned int lpid = kvm->arch.lpid;
> +	unsigned long flags;
> +	struct kvmppc_hmm_pfn_entry *p;
> +	struct page *hmm_page, *page;
> +	struct kvmppc_hmm_page_pvt *pvt;
> +	unsigned long pfn;
> +
> +	/*
> +	 * First check if the requested page has already been given to
> +	 * UV as a secure page. If so, ensure that we don't issue a
> +	 * UV_PAGE_OUT but instead directly send the page
> +	 */
> +	spin_lock_irqsave(&kvmppc_hmm_lock, flags);
> +	hmm_hash = kvm->arch.hmm_hash;
> +	list = &hmm_hash[kvmppc_hmm_pfn_hash_fn(gpa)];
> +	hlist_for_each_entry(p, list, hlist) {
> +		if (p->addr == gpa) {
> +			hmm_page = pfn_to_page(p->hmm_pfn);
> +			get_page(hmm_page); /* TODO: Necessary ? */
> +			pvt = (struct kvmppc_hmm_page_pvt *)
> +				hmm_devmem_page_get_drvdata(hmm_page);
> +			pvt->skip_page_out = true;
> +			put_page(hmm_page);
> +			break;
> +		}
> +	}
> +	spin_unlock_irqrestore(&kvmppc_hmm_lock, flags);
> +
> +	ret = get_user_pages_fast(addr, 1, 0, &page);

Why are we calling this with write==0?  Surely in general the secure
guest will expect to be able to write to the shared page?

Also, in general get_user_pages_fast isn't sufficient to translate a
host virtual address (derived from a guest real address) into a pfn.
See for example hva_to_pfn() in virt/kvm/kvm_main.c and the things it
does to cope with the various cases that one can hit.  I can imagine
in future that the secure guest might want to establish a shared
mapping to a PCI device, for instance.

> +	if (ret != 1)
> +		return H_PARAMETER;
> +
> +	pfn = page_to_pfn(page);
> +	if (is_zero_pfn(pfn)) {
> +		put_page(page);
> +		return H_SUCCESS;
> +	}

The ultravisor still needs a page to map into the guest in this case,
doesn't it?  What's the point of returning without giving the
ultravisor a page to use?

> +
> +	ret = uv_page_in(lpid, pfn << page_shift, gpa, 0, page_shift);
> +	put_page(page);
> +
> +	return (ret == U_SUCCESS) ? H_SUCCESS : H_PARAMETER;
> +}
> +
>  /*
>   * Move page from normal memory to secure memory.
>   */
> @@ -300,8 +360,8 @@ kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gpa,
>  		return H_PARAMETER;
>  	end = addr + (1UL << page_shift);
>  
> -	if (flags)
> -		return H_P2;
> +	if (flags & H_PAGE_IN_SHARED)
> +		return kvmppc_share_page(kvm, gpa, addr, page_shift);

Would be best to fail if any unknown flags are set, I would think.

>  
>  	args.hmm_hash = kvm->arch.hmm_hash;
>  	args.lpid = kvm->arch.lpid;
> @@ -349,8 +409,9 @@ kvmppc_hmm_fault_migrate_alloc_and_copy(struct vm_area_struct *vma,
>  	       hmm_devmem_page_get_drvdata(spage);
>  
>  	pfn = page_to_pfn(dpage);
> -	ret = uv_page_out(pvt->lpid, pfn << PAGE_SHIFT,
> -			  pvt->gpa, 0, PAGE_SHIFT);
> +	if (!pvt->skip_page_out)
> +		ret = uv_page_out(pvt->lpid, pfn << PAGE_SHIFT,
> +				  pvt->gpa, 0, PAGE_SHIFT);
>  	if (ret == U_SUCCESS)
>  		*dst_pfn = migrate_pfn(pfn) | MIGRATE_PFN_LOCKED;
>  }
> -- 
> 2.17.1

Paul.
Education Directorate Nov. 1, 2018, 10:45 a.m. UTC | #2
On Mon, Oct 22, 2018 at 10:48:35AM +0530, Bharata B Rao wrote:
> A secure guest will share some of its pages with hypervisor (Eg. virtio
> bounce buffers etc). Support shared pages in HMM driver.
> 
> Signed-off-by: Bharata B Rao <bharata@linux.ibm.com>
> ---
>  arch/powerpc/kvm/book3s_hv_hmm.c | 69 ++++++++++++++++++++++++++++++--
>  1 file changed, 65 insertions(+), 4 deletions(-)
> 
> diff --git a/arch/powerpc/kvm/book3s_hv_hmm.c b/arch/powerpc/kvm/book3s_hv_hmm.c
> index a2ee3163a312..09b8e19b7605 100644
> --- a/arch/powerpc/kvm/book3s_hv_hmm.c
> +++ b/arch/powerpc/kvm/book3s_hv_hmm.c
> @@ -50,6 +50,7 @@ struct kvmppc_hmm_page_pvt {
>  	struct hlist_head *hmm_hash;
>  	unsigned int lpid;
>  	unsigned long gpa;
> +	bool skip_page_out;
>  };
>  
>  struct kvmppc_hmm_migrate_args {
> @@ -278,6 +279,65 @@ static unsigned long kvmppc_gpa_to_hva(struct kvm *kvm, unsigned long gpa,
>  	return hva;
>  }
>  
> +/*
> + * Shares the page with HV, thus making it a normal page.
> + *
> + * - If the page is already secure, then provision a new page and share
> + * - If the page is a normal page, share the existing page
> + *
> + * In the former case, uses the HMM fault handler to release the HMM page.
> + */
> +static unsigned long
> +kvmppc_share_page(struct kvm *kvm, unsigned long gpa,
> +		  unsigned long addr, unsigned long page_shift)
> +{
> +

So this is a special flag passed via the hypercall to say
this page can be skipped from page_out from secure memory?
Who has the master copy of the page at this point?

In which case the question is

Why did we get a fault on the page which resulted in the
fault migration ops being called?
What category of pages are considered shared?

Balbir
Bharata B Rao Nov. 12, 2018, 9:38 a.m. UTC | #3
On Tue, Oct 30, 2018 at 04:26:46PM +1100, Paul Mackerras wrote:
> On Mon, Oct 22, 2018 at 10:48:35AM +0530, Bharata B Rao wrote:
> > A secure guest will share some of its pages with hypervisor (Eg. virtio
> > bounce buffers etc). Support shared pages in HMM driver.
> > 
> > Signed-off-by: Bharata B Rao <bharata@linux.ibm.com>
> 
> Comments below...
> 
> > ---
> >  arch/powerpc/kvm/book3s_hv_hmm.c | 69 ++++++++++++++++++++++++++++++--
> >  1 file changed, 65 insertions(+), 4 deletions(-)
> > 
> > diff --git a/arch/powerpc/kvm/book3s_hv_hmm.c b/arch/powerpc/kvm/book3s_hv_hmm.c
> > index a2ee3163a312..09b8e19b7605 100644
> > --- a/arch/powerpc/kvm/book3s_hv_hmm.c
> > +++ b/arch/powerpc/kvm/book3s_hv_hmm.c
> > @@ -50,6 +50,7 @@ struct kvmppc_hmm_page_pvt {
> >  	struct hlist_head *hmm_hash;
> >  	unsigned int lpid;
> >  	unsigned long gpa;
> > +	bool skip_page_out;
> >  };
> >  
> >  struct kvmppc_hmm_migrate_args {
> > @@ -278,6 +279,65 @@ static unsigned long kvmppc_gpa_to_hva(struct kvm *kvm, unsigned long gpa,
> >  	return hva;
> >  }
> >  
> > +/*
> > + * Shares the page with HV, thus making it a normal page.
> > + *
> > + * - If the page is already secure, then provision a new page and share
> > + * - If the page is a normal page, share the existing page
> > + *
> > + * In the former case, uses the HMM fault handler to release the HMM page.
> > + */
> > +static unsigned long
> > +kvmppc_share_page(struct kvm *kvm, unsigned long gpa,
> > +		  unsigned long addr, unsigned long page_shift)
> > +{
> > +
> > +	int ret;
> > +	struct hlist_head *list, *hmm_hash;
> > +	unsigned int lpid = kvm->arch.lpid;
> > +	unsigned long flags;
> > +	struct kvmppc_hmm_pfn_entry *p;
> > +	struct page *hmm_page, *page;
> > +	struct kvmppc_hmm_page_pvt *pvt;
> > +	unsigned long pfn;
> > +
> > +	/*
> > +	 * First check if the requested page has already been given to
> > +	 * UV as a secure page. If so, ensure that we don't issue a
> > +	 * UV_PAGE_OUT but instead directly send the page
> > +	 */
> > +	spin_lock_irqsave(&kvmppc_hmm_lock, flags);
> > +	hmm_hash = kvm->arch.hmm_hash;
> > +	list = &hmm_hash[kvmppc_hmm_pfn_hash_fn(gpa)];
> > +	hlist_for_each_entry(p, list, hlist) {
> > +		if (p->addr == gpa) {
> > +			hmm_page = pfn_to_page(p->hmm_pfn);
> > +			get_page(hmm_page); /* TODO: Necessary ? */
> > +			pvt = (struct kvmppc_hmm_page_pvt *)
> > +				hmm_devmem_page_get_drvdata(hmm_page);
> > +			pvt->skip_page_out = true;
> > +			put_page(hmm_page);
> > +			break;
> > +		}
> > +	}
> > +	spin_unlock_irqrestore(&kvmppc_hmm_lock, flags);
> > +
> > +	ret = get_user_pages_fast(addr, 1, 0, &page);
> 
> Why are we calling this with write==0?  Surely in general the secure
> guest will expect to be able to write to the shared page?
> 
> Also, in general get_user_pages_fast isn't sufficient to translate a
> host virtual address (derived from a guest real address) into a pfn.
> See for example hva_to_pfn() in virt/kvm/kvm_main.c and the things it
> does to cope with the various cases that one can hit.  I can imagine
> in future that the secure guest might want to establish a shared
> mapping to a PCI device, for instance.

I switched to using gfn_to_pfn() which should cover all the cases.

> 
> > +	if (ret != 1)
> > +		return H_PARAMETER;
> > +
> > +	pfn = page_to_pfn(page);
> > +	if (is_zero_pfn(pfn)) {
> > +		put_page(page);
> > +		return H_SUCCESS;
> > +	}
> 
> The ultravisor still needs a page to map into the guest in this case,
> doesn't it?  What's the point of returning without giving the
> ultravisor a page to use?

Yes, missed it.

Regards,
Bharata.
Bharata B Rao Nov. 12, 2018, 10:07 a.m. UTC | #4
On Thu, Nov 01, 2018 at 09:45:52PM +1100, Balbir Singh wrote:
> On Mon, Oct 22, 2018 at 10:48:35AM +0530, Bharata B Rao wrote:
> > A secure guest will share some of its pages with hypervisor (Eg. virtio
> > bounce buffers etc). Support shared pages in HMM driver.
> > 
> > Signed-off-by: Bharata B Rao <bharata@linux.ibm.com>
> > ---
> >  arch/powerpc/kvm/book3s_hv_hmm.c | 69 ++++++++++++++++++++++++++++++--
> >  1 file changed, 65 insertions(+), 4 deletions(-)
> > 
> > diff --git a/arch/powerpc/kvm/book3s_hv_hmm.c b/arch/powerpc/kvm/book3s_hv_hmm.c
> > index a2ee3163a312..09b8e19b7605 100644
> > --- a/arch/powerpc/kvm/book3s_hv_hmm.c
> > +++ b/arch/powerpc/kvm/book3s_hv_hmm.c
> > @@ -50,6 +50,7 @@ struct kvmppc_hmm_page_pvt {
> >  	struct hlist_head *hmm_hash;
> >  	unsigned int lpid;
> >  	unsigned long gpa;
> > +	bool skip_page_out;
> >  };
> >  
> >  struct kvmppc_hmm_migrate_args {
> > @@ -278,6 +279,65 @@ static unsigned long kvmppc_gpa_to_hva(struct kvm *kvm, unsigned long gpa,
> >  	return hva;
> >  }
> >  
> > +/*
> > + * Shares the page with HV, thus making it a normal page.
> > + *
> > + * - If the page is already secure, then provision a new page and share
> > + * - If the page is a normal page, share the existing page
> > + *
> > + * In the former case, uses the HMM fault handler to release the HMM page.
> > + */
> > +static unsigned long
> > +kvmppc_share_page(struct kvm *kvm, unsigned long gpa,
> > +		  unsigned long addr, unsigned long page_shift)
> > +{
> > +
> 
> So this is a special flag passed via the hypercall to say
> this page can be skipped from page_out from secure memory?
> Who has the master copy of the page at this point?
> 
> In which case the question is
> 
> Why did we get a fault on the page which resulted in the
> fault migration ops being called?
> What category of pages are considered shared?

When UV/guest asks for sharing a page, there can be two cases:

- If the page is already secure, then provision a new page and share
- If the page is a normal page, share the existing page

In the former case, we touch the page via get_user_pages() and re-use the
HMM fault handler to release the HMM page. We use skip_page_out to mark
that this page is meant to be released w/o doing a page-out which otherwise
would be done if HV touches a secure page.

When a page is shared, both HV and UV have mappings to the same physical
page that resides in the non-secure memory.

Regards,
Bharata.
diff mbox series

Patch

diff --git a/arch/powerpc/kvm/book3s_hv_hmm.c b/arch/powerpc/kvm/book3s_hv_hmm.c
index a2ee3163a312..09b8e19b7605 100644
--- a/arch/powerpc/kvm/book3s_hv_hmm.c
+++ b/arch/powerpc/kvm/book3s_hv_hmm.c
@@ -50,6 +50,7 @@  struct kvmppc_hmm_page_pvt {
 	struct hlist_head *hmm_hash;
 	unsigned int lpid;
 	unsigned long gpa;
+	bool skip_page_out;
 };
 
 struct kvmppc_hmm_migrate_args {
@@ -278,6 +279,65 @@  static unsigned long kvmppc_gpa_to_hva(struct kvm *kvm, unsigned long gpa,
 	return hva;
 }
 
+/*
+ * Shares the page with HV, thus making it a normal page.
+ *
+ * - If the page is already secure, then provision a new page and share
+ * - If the page is a normal page, share the existing page
+ *
+ * In the former case, uses the HMM fault handler to release the HMM page.
+ */
+static unsigned long
+kvmppc_share_page(struct kvm *kvm, unsigned long gpa,
+		  unsigned long addr, unsigned long page_shift)
+{
+
+	int ret;
+	struct hlist_head *list, *hmm_hash;
+	unsigned int lpid = kvm->arch.lpid;
+	unsigned long flags;
+	struct kvmppc_hmm_pfn_entry *p;
+	struct page *hmm_page, *page;
+	struct kvmppc_hmm_page_pvt *pvt;
+	unsigned long pfn;
+
+	/*
+	 * First check if the requested page has already been given to
+	 * UV as a secure page. If so, ensure that we don't issue a
+	 * UV_PAGE_OUT but instead directly send the page
+	 */
+	spin_lock_irqsave(&kvmppc_hmm_lock, flags);
+	hmm_hash = kvm->arch.hmm_hash;
+	list = &hmm_hash[kvmppc_hmm_pfn_hash_fn(gpa)];
+	hlist_for_each_entry(p, list, hlist) {
+		if (p->addr == gpa) {
+			hmm_page = pfn_to_page(p->hmm_pfn);
+			get_page(hmm_page); /* TODO: Necessary ? */
+			pvt = (struct kvmppc_hmm_page_pvt *)
+				hmm_devmem_page_get_drvdata(hmm_page);
+			pvt->skip_page_out = true;
+			put_page(hmm_page);
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&kvmppc_hmm_lock, flags);
+
+	ret = get_user_pages_fast(addr, 1, 0, &page);
+	if (ret != 1)
+		return H_PARAMETER;
+
+	pfn = page_to_pfn(page);
+	if (is_zero_pfn(pfn)) {
+		put_page(page);
+		return H_SUCCESS;
+	}
+
+	ret = uv_page_in(lpid, pfn << page_shift, gpa, 0, page_shift);
+	put_page(page);
+
+	return (ret == U_SUCCESS) ? H_SUCCESS : H_PARAMETER;
+}
+
 /*
  * Move page from normal memory to secure memory.
  */
@@ -300,8 +360,8 @@  kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gpa,
 		return H_PARAMETER;
 	end = addr + (1UL << page_shift);
 
-	if (flags)
-		return H_P2;
+	if (flags & H_PAGE_IN_SHARED)
+		return kvmppc_share_page(kvm, gpa, addr, page_shift);
 
 	args.hmm_hash = kvm->arch.hmm_hash;
 	args.lpid = kvm->arch.lpid;
@@ -349,8 +409,9 @@  kvmppc_hmm_fault_migrate_alloc_and_copy(struct vm_area_struct *vma,
 	       hmm_devmem_page_get_drvdata(spage);
 
 	pfn = page_to_pfn(dpage);
-	ret = uv_page_out(pvt->lpid, pfn << PAGE_SHIFT,
-			  pvt->gpa, 0, PAGE_SHIFT);
+	if (!pvt->skip_page_out)
+		ret = uv_page_out(pvt->lpid, pfn << PAGE_SHIFT,
+				  pvt->gpa, 0, PAGE_SHIFT);
 	if (ret == U_SUCCESS)
 		*dst_pfn = migrate_pfn(pfn) | MIGRATE_PFN_LOCKED;
 }