diff mbox series

[v2,12/15] KVM: s390: move gmap_shadow_pgt_lookup() into kvm

Message ID 20250116113355.32184-13-imbrenda@linux.ibm.com (mailing list archive)
State New
Headers show
Series KVM: s390: Stop using page->index and other things | expand

Commit Message

Claudio Imbrenda Jan. 16, 2025, 11:33 a.m. UTC
Move gmap_shadow_pgt_lookup() from mm/gmap.c into kvm/gaccess.c .

Signed-off-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
---
 arch/s390/include/asm/gmap.h |  3 +--
 arch/s390/kvm/gaccess.c      | 40 +++++++++++++++++++++++++++++++
 arch/s390/kvm/gmap.h         |  2 ++
 arch/s390/mm/gmap.c          | 46 ++----------------------------------
 4 files changed, 45 insertions(+), 46 deletions(-)

Comments

Steffen Eiden Jan. 17, 2025, 12:58 p.m. UTC | #1
On Thu, Jan 16, 2025 at 12:33:52PM +0100, Claudio Imbrenda wrote:
> Move gmap_shadow_pgt_lookup() from mm/gmap.c into kvm/gaccess.c .
> 

+1 for the replacement of BUG_ON with KVM_BUG_ON

> Signed-off-by: Claudio Imbrenda <imbrenda@linux.ibm.com>

Reviewed-by: Steffen Eiden <seiden@linux.ibm.com>
Janosch Frank Jan. 20, 2025, 1:47 p.m. UTC | #2
On 1/16/25 12:33 PM, Claudio Imbrenda wrote:
> Move gmap_shadow_pgt_lookup() from mm/gmap.c into kvm/gaccess.c .
> 
> Signed-off-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
> ---
>   arch/s390/include/asm/gmap.h |  3 +--
>   arch/s390/kvm/gaccess.c      | 40 +++++++++++++++++++++++++++++++
>   arch/s390/kvm/gmap.h         |  2 ++
>   arch/s390/mm/gmap.c          | 46 ++----------------------------------
>   4 files changed, 45 insertions(+), 46 deletions(-)

[...]

>   
> +/**
> + * gmap_shadow_pgt_lookup - find a shadow page table

The other two VSIE functions in gaccess.c have the kvm_s390_shadow 
prefix but this one is only static anyway and hence we could just drop 
the gmap_ from the name.

> + * @sg: pointer to the shadow guest address space structure
> + * @saddr: the address in the shadow aguest address space
> + * @pgt: parent gmap address of the page table to get shadowed
> + * @dat_protection: if the pgtable is marked as protected by dat
> + * @fake: pgt references contiguous guest memory block, not a pgtable
> + *
> + * Returns 0 if the shadow page table was found and -EAGAIN if the page
> + * table was not found.
> + *
> + * Called with sg->mm->mmap_lock in read.
> + */
> +static int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr, unsigned long *pgt,
> +				  int *dat_protection, int *fake)
> +{
> +	unsigned long *table;
> +	struct page *page;
> +	int rc;
> +
> +	spin_lock(&sg->guest_table_lock);
> +	table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */

I'd be happy if you could introduce an enum for the level argument in a 
future series.

> +	if (table && !(*table & _SEGMENT_ENTRY_INVALID)) {
> +		/* Shadow page tables are full pages (pte+pgste) */
> +		page = pfn_to_page(*table >> PAGE_SHIFT);
> +		*pgt = page->index & ~GMAP_SHADOW_FAKE_TABLE;
> +		*dat_protection = !!(*table & _SEGMENT_ENTRY_PROTECT);
> +		*fake = !!(page->index & GMAP_SHADOW_FAKE_TABLE);
> +		rc = 0;
> +	} else  {
> +		rc = -EAGAIN;
> +	}
> +	spin_unlock(&sg->guest_table_lock);
> +	return rc;
> +}
> +
>
Claudio Imbrenda Jan. 20, 2025, 1:54 p.m. UTC | #3
On Mon, 20 Jan 2025 14:47:55 +0100
Janosch Frank <frankja@linux.ibm.com> wrote:

> On 1/16/25 12:33 PM, Claudio Imbrenda wrote:
> > Move gmap_shadow_pgt_lookup() from mm/gmap.c into kvm/gaccess.c .
> > 
> > Signed-off-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
> > ---
> >   arch/s390/include/asm/gmap.h |  3 +--
> >   arch/s390/kvm/gaccess.c      | 40 +++++++++++++++++++++++++++++++
> >   arch/s390/kvm/gmap.h         |  2 ++
> >   arch/s390/mm/gmap.c          | 46 ++----------------------------------
> >   4 files changed, 45 insertions(+), 46 deletions(-)  
> 
> [...]
> 
> >   
> > +/**
> > + * gmap_shadow_pgt_lookup - find a shadow page table  
> 
> The other two VSIE functions in gaccess.c have the kvm_s390_shadow 
> prefix but this one is only static anyway and hence we could just drop 
> the gmap_ from the name.

fair enough, will fix

> 
> > + * @sg: pointer to the shadow guest address space structure
> > + * @saddr: the address in the shadow aguest address space
> > + * @pgt: parent gmap address of the page table to get shadowed
> > + * @dat_protection: if the pgtable is marked as protected by dat
> > + * @fake: pgt references contiguous guest memory block, not a pgtable
> > + *
> > + * Returns 0 if the shadow page table was found and -EAGAIN if the page
> > + * table was not found.
> > + *
> > + * Called with sg->mm->mmap_lock in read.
> > + */
> > +static int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr, unsigned long *pgt,
> > +				  int *dat_protection, int *fake)
> > +{
> > +	unsigned long *table;
> > +	struct page *page;
> > +	int rc;
> > +
> > +	spin_lock(&sg->guest_table_lock);
> > +	table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */  
> 
> I'd be happy if you could introduce an enum for the level argument in a 
> future series.

in upcoming series, the gmap table walk will be completely
rewritten, and yes, it will have enums

> 
> > +	if (table && !(*table & _SEGMENT_ENTRY_INVALID)) {
> > +		/* Shadow page tables are full pages (pte+pgste) */
> > +		page = pfn_to_page(*table >> PAGE_SHIFT);
> > +		*pgt = page->index & ~GMAP_SHADOW_FAKE_TABLE;
> > +		*dat_protection = !!(*table & _SEGMENT_ENTRY_PROTECT);
> > +		*fake = !!(page->index & GMAP_SHADOW_FAKE_TABLE);
> > +		rc = 0;
> > +	} else  {
> > +		rc = -EAGAIN;
> > +	}
> > +	spin_unlock(&sg->guest_table_lock);
> > +	return rc;
> > +}
> > +
> >
diff mbox series

Patch

diff --git a/arch/s390/include/asm/gmap.h b/arch/s390/include/asm/gmap.h
index 99ded56c914b..ec07f99fcc7d 100644
--- a/arch/s390/include/asm/gmap.h
+++ b/arch/s390/include/asm/gmap.h
@@ -127,8 +127,6 @@  int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
 		    int fake);
 int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt,
 		    int fake);
-int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr,
-			   unsigned long *pgt, int *dat_protection, int *fake);
 int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte);
 
 void gmap_register_pte_notifier(struct gmap_notifier *);
@@ -143,6 +141,7 @@  int s390_replace_asce(struct gmap *gmap);
 void s390_uv_destroy_pfns(unsigned long count, unsigned long *pfns);
 int __s390_uv_destroy_range(struct mm_struct *mm, unsigned long start,
 			    unsigned long end, bool interruptible);
+unsigned long *gmap_table_walk(struct gmap *gmap, unsigned long gaddr, int level);
 
 /**
  * s390_uv_destroy_range - Destroy a range of pages in the given mm.
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
index 9816b0060fbe..560b5677929b 100644
--- a/arch/s390/kvm/gaccess.c
+++ b/arch/s390/kvm/gaccess.c
@@ -16,6 +16,7 @@ 
 #include <asm/gmap.h>
 #include <asm/dat-bits.h>
 #include "kvm-s390.h"
+#include "gmap.h"
 #include "gaccess.h"
 
 /*
@@ -1392,6 +1393,42 @@  static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
 	return 0;
 }
 
+/**
+ * gmap_shadow_pgt_lookup - find a shadow page table
+ * @sg: pointer to the shadow guest address space structure
+ * @saddr: the address in the shadow aguest address space
+ * @pgt: parent gmap address of the page table to get shadowed
+ * @dat_protection: if the pgtable is marked as protected by dat
+ * @fake: pgt references contiguous guest memory block, not a pgtable
+ *
+ * Returns 0 if the shadow page table was found and -EAGAIN if the page
+ * table was not found.
+ *
+ * Called with sg->mm->mmap_lock in read.
+ */
+static int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr, unsigned long *pgt,
+				  int *dat_protection, int *fake)
+{
+	unsigned long *table;
+	struct page *page;
+	int rc;
+
+	spin_lock(&sg->guest_table_lock);
+	table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */
+	if (table && !(*table & _SEGMENT_ENTRY_INVALID)) {
+		/* Shadow page tables are full pages (pte+pgste) */
+		page = pfn_to_page(*table >> PAGE_SHIFT);
+		*pgt = page->index & ~GMAP_SHADOW_FAKE_TABLE;
+		*dat_protection = !!(*table & _SEGMENT_ENTRY_PROTECT);
+		*fake = !!(page->index & GMAP_SHADOW_FAKE_TABLE);
+		rc = 0;
+	} else  {
+		rc = -EAGAIN;
+	}
+	spin_unlock(&sg->guest_table_lock);
+	return rc;
+}
+
 /**
  * kvm_s390_shadow_fault - handle fault on a shadow page table
  * @vcpu: virtual cpu
@@ -1415,6 +1452,9 @@  int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
 	int dat_protection, fake;
 	int rc;
 
+	if (KVM_BUG_ON(!gmap_is_shadow(sg), vcpu->kvm))
+		return -EFAULT;
+
 	mmap_read_lock(sg->mm);
 	/*
 	 * We don't want any guest-2 tables to change - so the parent
diff --git a/arch/s390/kvm/gmap.h b/arch/s390/kvm/gmap.h
index 978f541059f0..c8f031c9ea5f 100644
--- a/arch/s390/kvm/gmap.h
+++ b/arch/s390/kvm/gmap.h
@@ -10,6 +10,8 @@ 
 #ifndef ARCH_KVM_S390_GMAP_H
 #define ARCH_KVM_S390_GMAP_H
 
+#define GMAP_SHADOW_FAKE_TABLE 1ULL
+
 int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb);
 int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr);
 int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr);
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index a5c3ae18bc6f..9d4a62628e51 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -27,8 +27,6 @@ 
 #define GADDR_VALID(gaddr) ((gaddr) & 1)
 #define GMAP_SHADOW_FAKE_TABLE 1ULL
 
-static inline unsigned long *gmap_table_walk(struct gmap *gmap, unsigned long gaddr, int level);
-
 static struct page *gmap_alloc_crst(void)
 {
 	struct page *page;
@@ -729,8 +727,7 @@  static void gmap_call_notifier(struct gmap *gmap, unsigned long start,
  *
  * Note: Can also be called for shadow gmaps.
  */
-static inline unsigned long *gmap_table_walk(struct gmap *gmap,
-					     unsigned long gaddr, int level)
+unsigned long *gmap_table_walk(struct gmap *gmap, unsigned long gaddr, int level)
 {
 	const int asce_type = gmap->asce & _ASCE_TYPE_MASK;
 	unsigned long *table = gmap->table;
@@ -781,6 +778,7 @@  static inline unsigned long *gmap_table_walk(struct gmap *gmap,
 	}
 	return table;
 }
+EXPORT_SYMBOL(gmap_table_walk);
 
 /**
  * gmap_pte_op_walk - walk the gmap page table, get the page table lock
@@ -1731,46 +1729,6 @@  int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
 }
 EXPORT_SYMBOL_GPL(gmap_shadow_sgt);
 
-/**
- * gmap_shadow_pgt_lookup - find a shadow page table
- * @sg: pointer to the shadow guest address space structure
- * @saddr: the address in the shadow aguest address space
- * @pgt: parent gmap address of the page table to get shadowed
- * @dat_protection: if the pgtable is marked as protected by dat
- * @fake: pgt references contiguous guest memory block, not a pgtable
- *
- * Returns 0 if the shadow page table was found and -EAGAIN if the page
- * table was not found.
- *
- * Called with sg->mm->mmap_lock in read.
- */
-int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr,
-			   unsigned long *pgt, int *dat_protection,
-			   int *fake)
-{
-	unsigned long *table;
-	struct page *page;
-	int rc;
-
-	BUG_ON(!gmap_is_shadow(sg));
-	spin_lock(&sg->guest_table_lock);
-	table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */
-	if (table && !(*table & _SEGMENT_ENTRY_INVALID)) {
-		/* Shadow page tables are full pages (pte+pgste) */
-		page = pfn_to_page(*table >> PAGE_SHIFT);
-		*pgt = page->index & ~GMAP_SHADOW_FAKE_TABLE;
-		*dat_protection = !!(*table & _SEGMENT_ENTRY_PROTECT);
-		*fake = !!(page->index & GMAP_SHADOW_FAKE_TABLE);
-		rc = 0;
-	} else  {
-		rc = -EAGAIN;
-	}
-	spin_unlock(&sg->guest_table_lock);
-	return rc;
-
-}
-EXPORT_SYMBOL_GPL(gmap_shadow_pgt_lookup);
-
 /**
  * gmap_shadow_pgt - instantiate a shadow page table
  * @sg: pointer to the shadow guest address space structure