diff mbox

[v7,08/12] s390/mm: Clear skeys for newly mapped huge guest pmds

Message ID 20180717124426.6240-9-frankja@linux.ibm.com (mailing list archive)
State New, archived
Headers show

Commit Message

Janosch Frank July 17, 2018, 12:44 p.m. UTC
Similarly to the pte skey handling, where we set the storage key to
the default key for each newly mapped pte, we have to also do that for
huge pmds.

Signed-off-by: Janosch Frank <frankja@linux.ibm.com>
---
 arch/s390/mm/pgtable.c | 22 ++++++++++++++++++++++
 1 file changed, 22 insertions(+)

Comments

David Hildenbrand July 17, 2018, 7:37 p.m. UTC | #1
On 17.07.2018 14:44, Janosch Frank wrote:
> Similarly to the pte skey handling, where we set the storage key to
> the default key for each newly mapped pte, we have to also do that for
> huge pmds.
> 
> Signed-off-by: Janosch Frank <frankja@linux.ibm.com>
> ---
>  arch/s390/mm/pgtable.c | 22 ++++++++++++++++++++++
>  1 file changed, 22 insertions(+)
> 
> diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
> index 09531f779271..60fff609ab61 100644
> --- a/arch/s390/mm/pgtable.c
> +++ b/arch/s390/mm/pgtable.c
> @@ -410,12 +410,32 @@ static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm,
>  	return old;
>  }
>  
> +static void pmdp_clear_skeys(struct mm_struct *mm, pmd_t *pmdp, pmd_t new)
> +{
> +	unsigned long paddr = pmd_val(new) & HPAGE_MASK;
> +
> +	/*
> +	 * After a guest has used the first storage key instruction,
> +	 * we must ensure, that each newly mapped huge pmd has all
> +	 * skeys cleared before mapping it.
> +	 */
> +	if (!mm_uses_skeys(mm) ||

I would move this check to the callers. At least then it is clear why we
don't call a skey function (as said, mm_has_pgste() is really misleading).

> +	    !pmd_none(*pmdp) ||
> +	    !pmd_large(new) ||
> +	    (pmd_val(new) & _SEGMENT_ENTRY_INVALID))
> +		return;
> +
> +	__storage_key_init_range(paddr, paddr + HPAGE_SIZE - 1);
> +}
> +
>  pmd_t pmdp_xchg_direct(struct mm_struct *mm, unsigned long addr,
>  		       pmd_t *pmdp, pmd_t new)
>  {
>  	pmd_t old;
>  
>  	preempt_disable();
> +	if (mm_has_pgste(mm))
> +		pmdp_clear_skeys(mm, pmdp, new);
>  	old = pmdp_flush_direct(mm, addr, pmdp);
>  	*pmdp = new;
>  	preempt_enable();
> @@ -429,6 +449,8 @@ pmd_t pmdp_xchg_lazy(struct mm_struct *mm, unsigned long addr,
>  	pmd_t old;
>  
>  	preempt_disable();
> +	if (mm_has_pgste(mm))
> +		pmdp_clear_skeys(mm, pmdp, new);
>  	old = pmdp_flush_lazy(mm, addr, pmdp);
>  	*pmdp = new;
>  	preempt_enable();
> 

Hoping Martin can have a look

Acked-by: David Hildenbrand <david@redhat.com>
Janosch Frank July 18, 2018, 7:20 a.m. UTC | #2
On Tue, 17 Jul 2018 21:37:23 +0200
David Hildenbrand <david@redhat.com> wrote:

> On 17.07.2018 14:44, Janosch Frank wrote:
> > Similarly to the pte skey handling, where we set the storage key to
> > the default key for each newly mapped pte, we have to also do that
> > for huge pmds.
> > 
> > Signed-off-by: Janosch Frank <frankja@linux.ibm.com>
> > ---
> >  arch/s390/mm/pgtable.c | 22 ++++++++++++++++++++++
> >  1 file changed, 22 insertions(+)
> > 
> > diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
> > index 09531f779271..60fff609ab61 100644
> > --- a/arch/s390/mm/pgtable.c
> > +++ b/arch/s390/mm/pgtable.c
> > @@ -410,12 +410,32 @@ static inline pmd_t pmdp_flush_lazy(struct
> > mm_struct *mm, return old;
> >  }
> >  
> > +static void pmdp_clear_skeys(struct mm_struct *mm, pmd_t *pmdp,
> > pmd_t new) +{
> > +	unsigned long paddr = pmd_val(new) & HPAGE_MASK;
> > +
> > +	/*
> > +	 * After a guest has used the first storage key
> > instruction,
> > +	 * we must ensure, that each newly mapped huge pmd has all
> > +	 * skeys cleared before mapping it.
> > +	 */
> > +	if (!mm_uses_skeys(mm) ||  
> 
> I would move this check to the callers. At least then it is clear why
> we don't call a skey function (as said, mm_has_pgste() is really
> misleading).

Now that pmdp_notify is gone from there I can do that.

> 
> > +	    !pmd_none(*pmdp) ||
> > +	    !pmd_large(new) ||
> > +	    (pmd_val(new) & _SEGMENT_ENTRY_INVALID))
> > +		return;
> > +
> > +	__storage_key_init_range(paddr, paddr + HPAGE_SIZE - 1);
> > +}
> > +
> >  pmd_t pmdp_xchg_direct(struct mm_struct *mm, unsigned long addr,
> >  		       pmd_t *pmdp, pmd_t new)
> >  {
> >  	pmd_t old;
> >  
> >  	preempt_disable();
> > +	if (mm_has_pgste(mm))
> > +		pmdp_clear_skeys(mm, pmdp, new);
> >  	old = pmdp_flush_direct(mm, addr, pmdp);
> >  	*pmdp = new;
> >  	preempt_enable();
> > @@ -429,6 +449,8 @@ pmd_t pmdp_xchg_lazy(struct mm_struct *mm,
> > unsigned long addr, pmd_t old;
> >  
> >  	preempt_disable();
> > +	if (mm_has_pgste(mm))
> > +		pmdp_clear_skeys(mm, pmdp, new);

The pgste check here will be replaced with mm_uses_skeys() as it is
bound to gmaps anyway.

> >  	old = pmdp_flush_lazy(mm, addr, pmdp);
> >  	*pmdp = new;
> >  	preempt_enable();
> >   
> 
> Hoping Martin can have a look
> 
> Acked-by: David Hildenbrand <david@redhat.com>
>
Martin Schwidefsky July 19, 2018, 11:56 a.m. UTC | #3
On Tue, 17 Jul 2018 21:37:23 +0200
David Hildenbrand <david@redhat.com> wrote:

> On 17.07.2018 14:44, Janosch Frank wrote:
> > Similarly to the pte skey handling, where we set the storage key to
> > the default key for each newly mapped pte, we have to also do that for
> > huge pmds.
> > 
> > Signed-off-by: Janosch Frank <frankja@linux.ibm.com>
> > ---
> >  arch/s390/mm/pgtable.c | 22 ++++++++++++++++++++++
> >  1 file changed, 22 insertions(+)
> > 
> > diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
> > index 09531f779271..60fff609ab61 100644
> > --- a/arch/s390/mm/pgtable.c
> > +++ b/arch/s390/mm/pgtable.c
> > @@ -410,12 +410,32 @@ static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm,
> >  	return old;
> >  }
> >  
> > +static void pmdp_clear_skeys(struct mm_struct *mm, pmd_t *pmdp, pmd_t new)
> > +{
> > +	unsigned long paddr = pmd_val(new) & HPAGE_MASK;
> > +
> > +	/*
> > +	 * After a guest has used the first storage key instruction,
> > +	 * we must ensure, that each newly mapped huge pmd has all
> > +	 * skeys cleared before mapping it.
> > +	 */
> > +	if (!mm_uses_skeys(mm) ||  
> 
> I would move this check to the callers. At least then it is clear why we
> don't call a skey function (as said, mm_has_pgste() is really misleading).
> 
> > +	    !pmd_none(*pmdp) ||
> > +	    !pmd_large(new) ||
> > +	    (pmd_val(new) & _SEGMENT_ENTRY_INVALID))
> > +		return;
> > +
> > +	__storage_key_init_range(paddr, paddr + HPAGE_SIZE - 1);
> > +}
> > +
> >  pmd_t pmdp_xchg_direct(struct mm_struct *mm, unsigned long addr,
> >  		       pmd_t *pmdp, pmd_t new)
> >  {
> >  	pmd_t old;
> >  
> >  	preempt_disable();
> > +	if (mm_has_pgste(mm))
> > +		pmdp_clear_skeys(mm, pmdp, new);
> >  	old = pmdp_flush_direct(mm, addr, pmdp);
> >  	*pmdp = new;
> >  	preempt_enable();
> > @@ -429,6 +449,8 @@ pmd_t pmdp_xchg_lazy(struct mm_struct *mm, unsigned long addr,
> >  	pmd_t old;
> >  
> >  	preempt_disable();
> > +	if (mm_has_pgste(mm))
> > +		pmdp_clear_skeys(mm, pmdp, new);
> >  	old = pmdp_flush_lazy(mm, addr, pmdp);
> >  	*pmdp = new;
> >  	preempt_enable();
> >   
> 
> Hoping Martin can have a look
> 
> Acked-by: David Hildenbrand <david@redhat.com>
 
Talked to Janosch about this, stay tuned for the next version.
diff mbox

Patch

diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 09531f779271..60fff609ab61 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -410,12 +410,32 @@  static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm,
 	return old;
 }
 
+static void pmdp_clear_skeys(struct mm_struct *mm, pmd_t *pmdp, pmd_t new)
+{
+	unsigned long paddr = pmd_val(new) & HPAGE_MASK;
+
+	/*
+	 * After a guest has used the first storage key instruction,
+	 * we must ensure, that each newly mapped huge pmd has all
+	 * skeys cleared before mapping it.
+	 */
+	if (!mm_uses_skeys(mm) ||
+	    !pmd_none(*pmdp) ||
+	    !pmd_large(new) ||
+	    (pmd_val(new) & _SEGMENT_ENTRY_INVALID))
+		return;
+
+	__storage_key_init_range(paddr, paddr + HPAGE_SIZE - 1);
+}
+
 pmd_t pmdp_xchg_direct(struct mm_struct *mm, unsigned long addr,
 		       pmd_t *pmdp, pmd_t new)
 {
 	pmd_t old;
 
 	preempt_disable();
+	if (mm_has_pgste(mm))
+		pmdp_clear_skeys(mm, pmdp, new);
 	old = pmdp_flush_direct(mm, addr, pmdp);
 	*pmdp = new;
 	preempt_enable();
@@ -429,6 +449,8 @@  pmd_t pmdp_xchg_lazy(struct mm_struct *mm, unsigned long addr,
 	pmd_t old;
 
 	preempt_disable();
+	if (mm_has_pgste(mm))
+		pmdp_clear_skeys(mm, pmdp, new);
 	old = pmdp_flush_lazy(mm, addr, pmdp);
 	*pmdp = new;
 	preempt_enable();