diff mbox

[v5,06/11] s390/mm: Add gmap pmd invalidation and clearing

Message ID 20180706135529.88966-7-frankja@linux.ibm.com (mailing list archive)
State New, archived
Headers show

Commit Message

Janosch Frank July 6, 2018, 1:55 p.m. UTC
If the host invalidates a pmd, we also have to invalidate the
corresponding gmap pmds, as well as flush them from the TLB. This is
necessary, as we don't share the pmd tables between host and guest as
we do with ptes.

The clearing part of these three new functions sets a guest pmd entry
to _SEGMENT_ENTRY_EMPTY, so the guest will fault on it and we will
re-link it.

Flushing the gmap is not necessary in the host's lazy local and csp
cases. Both purge the TLB completely.

Signed-off-by: Janosch Frank <frankja@linux.vnet.ibm.com>
Reviewed-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
---
 arch/s390/include/asm/pgtable.h |   4 ++
 arch/s390/mm/gmap.c             | 114 ++++++++++++++++++++++++++++++++++++++++
 arch/s390/mm/pgtable.c          |  17 ++++--
 3 files changed, 132 insertions(+), 3 deletions(-)

Comments

David Hildenbrand July 6, 2018, 2:47 p.m. UTC | #1
On 06.07.2018 15:55, Janosch Frank wrote:
> If the host invalidates a pmd, we also have to invalidate the
> corresponding gmap pmds, as well as flush them from the TLB. This is
> necessary, as we don't share the pmd tables between host and guest as
> we do with ptes.
> 
> The clearing part of these three new functions sets a guest pmd entry
> to _SEGMENT_ENTRY_EMPTY, so the guest will fault on it and we will
> re-link it.
> 
> Flushing the gmap is not necessary in the host's lazy local and csp
> cases. Both purge the TLB completely.
> 
> Signed-off-by: Janosch Frank <frankja@linux.vnet.ibm.com>
> Reviewed-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
> ---
>  arch/s390/include/asm/pgtable.h |   4 ++
>  arch/s390/mm/gmap.c             | 114 ++++++++++++++++++++++++++++++++++++++++
>  arch/s390/mm/pgtable.c          |  17 ++++--
>  3 files changed, 132 insertions(+), 3 deletions(-)
> 
> diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
> index 7c9ccd180e75..7f51e33f6456 100644
> --- a/arch/s390/include/asm/pgtable.h
> +++ b/arch/s390/include/asm/pgtable.h
> @@ -1119,6 +1119,10 @@ int set_pgste_bits(struct mm_struct *mm, unsigned long addr,
>  int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep);
>  int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
>  			unsigned long *oldpte, unsigned long *oldpgste);
> +void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr);
> +void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr);
> +void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr);
> +void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr);
>  
>  /*
>   * Certain architectures need to do special things when PTEs
> diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
> index 3140f9084c8b..75d50dadd59f 100644
> --- a/arch/s390/mm/gmap.c
> +++ b/arch/s390/mm/gmap.c
> @@ -2276,6 +2276,120 @@ static void gmap_pmdp_xchg(struct gmap *gmap, pmd_t *pmdp, pmd_t new,
>  	*pmdp = new;
>  }
>  
> +static void gmap_pmdp_clear(struct mm_struct *mm, unsigned long vmaddr,
> +			    int purge)
> +{
> +	pmd_t *pmdp;
> +	struct gmap *gmap;
> +
> +	rcu_read_lock();
> +	list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
> +		spin_lock(&gmap->guest_table_lock);
> +		pmdp = (pmd_t *)radix_tree_delete(&gmap->host_to_guest,
> +						   vmaddr >> PMD_SHIFT);
> +		if (pmdp) {
> +			if (purge)
> +				__pmdp_csp(pmdp);
> +			pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;

Just wondering if we actually call notifiers on all possible paths?
Should be add a WARN_ON() in case the GMAP PMDP still has a notifier bit
set?

> +		}
> +		spin_unlock(&gmap->guest_table_lock);
> +	}
> +	rcu_read_unlock();
> +}
> +
> +/**
> + * gmap_pmdp_invalidate - invalidate all affected guest pmd entries without
> + *                        flushing
> + * @mm: pointer to the process mm_struct
> + * @vmaddr: virtual address in the process address space
> + */
> +void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr)
> +{
> +	gmap_pmdp_clear(mm, vmaddr, 0);
> +}
> +EXPORT_SYMBOL_GPL(gmap_pmdp_invalidate);
> +
> +/**
> + * gmap_pmdp_csp - csp all affected guest pmd entries
> + * @mm: pointer to the process mm_struct
> + * @vmaddr: virtual address in the process address space
> + */
> +void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr)
> +{
> +	gmap_pmdp_clear(mm, vmaddr, 1);
> +}
> +EXPORT_SYMBOL_GPL(gmap_pmdp_csp);
> +
> +/**
> + * gmap_pmdp_idte_local - invalidate and clear a guest pmd entry
> + * @mm: pointer to the process mm_struct
> + * @vmaddr: virtual address in the process address space
> + */
> +void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr)
> +{
> +	unsigned long *entry, gaddr;
> +	struct gmap *gmap;
> +	pmd_t *pmdp;
> +
> +	rcu_read_lock();
> +	list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
> +		spin_lock(&gmap->guest_table_lock);
> +		entry = radix_tree_delete(&gmap->host_to_guest,
> +					  vmaddr >> PMD_SHIFT);
> +		if (entry) {
> +			pmdp = (pmd_t *)entry;
> +			gaddr = __gmap_segment_gaddr(entry);
> +			if (MACHINE_HAS_TLB_GUEST)
> +				__pmdp_idte(gaddr, pmdp,
> +					    IDTE_GUEST_ASCE,
> +					    gmap->asce, IDTE_LOCAL);
> +			else if (MACHINE_HAS_IDTE)
> +				__pmdp_idte(gaddr, pmdp, 0, 0,
> +					    IDTE_LOCAL);
> +			*entry = _SEGMENT_ENTRY_EMPTY;
> +		}
> +		spin_unlock(&gmap->guest_table_lock);
> +	}
> +	rcu_read_unlock();
> +}
> +EXPORT_SYMBOL_GPL(gmap_pmdp_idte_local);
> +
> +/**
> + * gmap_pmdp_idte_global - invalidate and clear a guest pmd entry
> + * @mm: pointer to the process mm_struct
> + * @vmaddr: virtual address in the process address space
> + */
> +void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr)
> +{
> +	unsigned long *entry, gaddr;
> +	struct gmap *gmap;
> +	pmd_t *pmdp;
> +
> +	rcu_read_lock();
> +	list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
> +		spin_lock(&gmap->guest_table_lock);
> +		entry = radix_tree_delete(&gmap->host_to_guest,
> +					  vmaddr >> PMD_SHIFT);
> +		if (entry) {
> +			pmdp = (pmd_t *)entry;
> +			gaddr = __gmap_segment_gaddr(entry);
> +			if (MACHINE_HAS_TLB_GUEST)
> +				__pmdp_idte(gaddr, pmdp,
> +					    IDTE_GUEST_ASCE,
> +					    gmap->asce, IDTE_GLOBAL);
> +			else if (MACHINE_HAS_IDTE)
> +				__pmdp_idte(gaddr, pmdp, 0, 0,
> +					    IDTE_GLOBAL);
> +			else
> +				__pmdp_csp(pmdp);
> +			*entry = _SEGMENT_ENTRY_EMPTY;
> +		}
> +		spin_unlock(&gmap->guest_table_lock);
> +	}
> +	rcu_read_unlock();
> +}
> +EXPORT_SYMBOL_GPL(gmap_pmdp_idte_global);
> +
>  static inline void thp_split_mm(struct mm_struct *mm)
>  {
>  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
> diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
> index 7e1c17b1a24a..7bdb15fc5487 100644
> --- a/arch/s390/mm/pgtable.c
> +++ b/arch/s390/mm/pgtable.c
> @@ -347,18 +347,27 @@ static inline void pmdp_idte_local(struct mm_struct *mm,
>  			    mm->context.asce, IDTE_LOCAL);
>  	else
>  		__pmdp_idte(addr, pmdp, 0, 0, IDTE_LOCAL);
> +	if (mm_has_pgste(mm))
> +		gmap_pmdp_idte_local(mm, addr);
>  }
>  
>  static inline void pmdp_idte_global(struct mm_struct *mm,
>  				    unsigned long addr, pmd_t *pmdp)
>  {
> -	if (MACHINE_HAS_TLB_GUEST)
> +	if (MACHINE_HAS_TLB_GUEST) {
>  		__pmdp_idte(addr, pmdp, IDTE_NODAT | IDTE_GUEST_ASCE,
>  			    mm->context.asce, IDTE_GLOBAL);
> -	else if (MACHINE_HAS_IDTE)
> +		if (mm_has_pgste(mm))
> +			gmap_pmdp_idte_global(mm, addr);
> +	} else if (MACHINE_HAS_IDTE) {
>  		__pmdp_idte(addr, pmdp, 0, 0, IDTE_GLOBAL);
> -	else
> +		if (mm_has_pgste(mm))
> +			gmap_pmdp_idte_global(mm, addr);
> +	} else {
>  		__pmdp_csp(pmdp);
> +		if (mm_has_pgste(mm))
> +			gmap_pmdp_csp(mm, addr);
> +	}
>  }
>  
>  static inline pmd_t pmdp_flush_direct(struct mm_struct *mm,
> @@ -392,6 +401,8 @@ static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm,
>  			  cpumask_of(smp_processor_id()))) {
>  		pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID;
>  		mm->context.flush_mm = 1;
> +		if (mm_has_pgste(mm))
> +			gmap_pmdp_invalidate(mm, addr);
>  	} else {
>  		pmdp_idte_global(mm, addr, pmdp);
>  	}
>
diff mbox

Patch

diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 7c9ccd180e75..7f51e33f6456 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -1119,6 +1119,10 @@  int set_pgste_bits(struct mm_struct *mm, unsigned long addr,
 int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep);
 int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
 			unsigned long *oldpte, unsigned long *oldpgste);
+void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr);
+void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr);
+void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr);
+void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr);
 
 /*
  * Certain architectures need to do special things when PTEs
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index 3140f9084c8b..75d50dadd59f 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -2276,6 +2276,120 @@  static void gmap_pmdp_xchg(struct gmap *gmap, pmd_t *pmdp, pmd_t new,
 	*pmdp = new;
 }
 
+static void gmap_pmdp_clear(struct mm_struct *mm, unsigned long vmaddr,
+			    int purge)
+{
+	pmd_t *pmdp;
+	struct gmap *gmap;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
+		spin_lock(&gmap->guest_table_lock);
+		pmdp = (pmd_t *)radix_tree_delete(&gmap->host_to_guest,
+						   vmaddr >> PMD_SHIFT);
+		if (pmdp) {
+			if (purge)
+				__pmdp_csp(pmdp);
+			pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
+		}
+		spin_unlock(&gmap->guest_table_lock);
+	}
+	rcu_read_unlock();
+}
+
+/**
+ * gmap_pmdp_invalidate - invalidate all affected guest pmd entries without
+ *                        flushing
+ * @mm: pointer to the process mm_struct
+ * @vmaddr: virtual address in the process address space
+ */
+void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr)
+{
+	gmap_pmdp_clear(mm, vmaddr, 0);
+}
+EXPORT_SYMBOL_GPL(gmap_pmdp_invalidate);
+
+/**
+ * gmap_pmdp_csp - csp all affected guest pmd entries
+ * @mm: pointer to the process mm_struct
+ * @vmaddr: virtual address in the process address space
+ */
+void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr)
+{
+	gmap_pmdp_clear(mm, vmaddr, 1);
+}
+EXPORT_SYMBOL_GPL(gmap_pmdp_csp);
+
+/**
+ * gmap_pmdp_idte_local - invalidate and clear a guest pmd entry
+ * @mm: pointer to the process mm_struct
+ * @vmaddr: virtual address in the process address space
+ */
+void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr)
+{
+	unsigned long *entry, gaddr;
+	struct gmap *gmap;
+	pmd_t *pmdp;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
+		spin_lock(&gmap->guest_table_lock);
+		entry = radix_tree_delete(&gmap->host_to_guest,
+					  vmaddr >> PMD_SHIFT);
+		if (entry) {
+			pmdp = (pmd_t *)entry;
+			gaddr = __gmap_segment_gaddr(entry);
+			if (MACHINE_HAS_TLB_GUEST)
+				__pmdp_idte(gaddr, pmdp,
+					    IDTE_GUEST_ASCE,
+					    gmap->asce, IDTE_LOCAL);
+			else if (MACHINE_HAS_IDTE)
+				__pmdp_idte(gaddr, pmdp, 0, 0,
+					    IDTE_LOCAL);
+			*entry = _SEGMENT_ENTRY_EMPTY;
+		}
+		spin_unlock(&gmap->guest_table_lock);
+	}
+	rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(gmap_pmdp_idte_local);
+
+/**
+ * gmap_pmdp_idte_global - invalidate and clear a guest pmd entry
+ * @mm: pointer to the process mm_struct
+ * @vmaddr: virtual address in the process address space
+ */
+void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr)
+{
+	unsigned long *entry, gaddr;
+	struct gmap *gmap;
+	pmd_t *pmdp;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
+		spin_lock(&gmap->guest_table_lock);
+		entry = radix_tree_delete(&gmap->host_to_guest,
+					  vmaddr >> PMD_SHIFT);
+		if (entry) {
+			pmdp = (pmd_t *)entry;
+			gaddr = __gmap_segment_gaddr(entry);
+			if (MACHINE_HAS_TLB_GUEST)
+				__pmdp_idte(gaddr, pmdp,
+					    IDTE_GUEST_ASCE,
+					    gmap->asce, IDTE_GLOBAL);
+			else if (MACHINE_HAS_IDTE)
+				__pmdp_idte(gaddr, pmdp, 0, 0,
+					    IDTE_GLOBAL);
+			else
+				__pmdp_csp(pmdp);
+			*entry = _SEGMENT_ENTRY_EMPTY;
+		}
+		spin_unlock(&gmap->guest_table_lock);
+	}
+	rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(gmap_pmdp_idte_global);
+
 static inline void thp_split_mm(struct mm_struct *mm)
 {
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 7e1c17b1a24a..7bdb15fc5487 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -347,18 +347,27 @@  static inline void pmdp_idte_local(struct mm_struct *mm,
 			    mm->context.asce, IDTE_LOCAL);
 	else
 		__pmdp_idte(addr, pmdp, 0, 0, IDTE_LOCAL);
+	if (mm_has_pgste(mm))
+		gmap_pmdp_idte_local(mm, addr);
 }
 
 static inline void pmdp_idte_global(struct mm_struct *mm,
 				    unsigned long addr, pmd_t *pmdp)
 {
-	if (MACHINE_HAS_TLB_GUEST)
+	if (MACHINE_HAS_TLB_GUEST) {
 		__pmdp_idte(addr, pmdp, IDTE_NODAT | IDTE_GUEST_ASCE,
 			    mm->context.asce, IDTE_GLOBAL);
-	else if (MACHINE_HAS_IDTE)
+		if (mm_has_pgste(mm))
+			gmap_pmdp_idte_global(mm, addr);
+	} else if (MACHINE_HAS_IDTE) {
 		__pmdp_idte(addr, pmdp, 0, 0, IDTE_GLOBAL);
-	else
+		if (mm_has_pgste(mm))
+			gmap_pmdp_idte_global(mm, addr);
+	} else {
 		__pmdp_csp(pmdp);
+		if (mm_has_pgste(mm))
+			gmap_pmdp_csp(mm, addr);
+	}
 }
 
 static inline pmd_t pmdp_flush_direct(struct mm_struct *mm,
@@ -392,6 +401,8 @@  static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm,
 			  cpumask_of(smp_processor_id()))) {
 		pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID;
 		mm->context.flush_mm = 1;
+		if (mm_has_pgste(mm))
+			gmap_pmdp_invalidate(mm, addr);
 	} else {
 		pmdp_idte_global(mm, addr, pmdp);
 	}