Message ID | 20180713063702.54628-13-frankja@linux.ibm.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On 13.07.2018 08:37, Janosch Frank wrote: > Let's allow huge pmd linking when enabled through the > KVM_CAP_S390_HPAGE_1M capability. Also we can now restrict gmap > invalidation and notification to the cases where the capability has > been activated and save some cycles when that's not the case. > > Signed-off-by: Janosch Frank <frankja@linux.ibm.com> > --- > arch/s390/mm/gmap.c | 9 ++++++--- > arch/s390/mm/pgtable.c | 12 ++++++------ > 2 files changed, 12 insertions(+), 9 deletions(-) > > diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c > index 8b1a293b00d3..1399392d2fbd 100644 > --- a/arch/s390/mm/gmap.c > +++ b/arch/s390/mm/gmap.c > @@ -2,8 +2,10 @@ > /* > * KVM guest address space mapping code > * > - * Copyright IBM Corp. 2007, 2016 > + * Copyright IBM Corp. 2007, 2016, 2018 > * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> > + * David Hildenbrand <david@redhat.com> > + * Janosch Frank <frankja@linux.vnet.ibm.com> > */ > > #include <linux/kernel.h> > @@ -589,8 +591,8 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr) > return -EFAULT; > pmd = pmd_offset(pud, vmaddr); > VM_BUG_ON(pmd_none(*pmd)); > - /* large pmds cannot yet be handled */ > - if (pmd_large(*pmd)) > + /* Are we allowed to use huge pages? */ > + if (pmd_large(*pmd) && !gmap->mm->context.allow_gmap_hpage_1m) > return -EFAULT; > /* Link gmap segment table entry location to page table. */ > rc = radix_tree_preload(GFP_KERNEL); > @@ -1631,6 +1633,7 @@ struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce, > unsigned long limit; > int rc; > > + BUG_ON(parent->mm->context.allow_gmap_hpage_1m); > BUG_ON(gmap_is_shadow(parent)); > spin_lock(&parent->shadow_lock); > sg = gmap_find_shadow(parent, asce, edat_level); > diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c > index 161e08437681..1275f73b5c39 100644 > --- a/arch/s390/mm/pgtable.c > +++ b/arch/s390/mm/pgtable.c > @@ -348,7 +348,7 @@ static inline void pmdp_idte_local(struct mm_struct *mm, > mm->context.asce, IDTE_LOCAL); > else > __pmdp_idte(addr, pmdp, 0, 0, IDTE_LOCAL); > - if (mm_has_pgste(mm)) > + if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m) > gmap_pmdp_idte_local(mm, addr); > } > > @@ -358,15 +358,15 @@ static inline void pmdp_idte_global(struct mm_struct *mm, > if (MACHINE_HAS_TLB_GUEST) { > __pmdp_idte(addr, pmdp, IDTE_NODAT | IDTE_GUEST_ASCE, > mm->context.asce, IDTE_GLOBAL); > - if (mm_has_pgste(mm)) > + if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m) > gmap_pmdp_idte_global(mm, addr); > } else if (MACHINE_HAS_IDTE) { > __pmdp_idte(addr, pmdp, 0, 0, IDTE_GLOBAL); > - if (mm_has_pgste(mm)) > + if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m) > gmap_pmdp_idte_global(mm, addr); > } else { > __pmdp_csp(pmdp); > - if (mm_has_pgste(mm)) > + if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m) > gmap_pmdp_csp(mm, addr); > } > } > @@ -435,7 +435,7 @@ pmd_t pmdp_xchg_direct(struct mm_struct *mm, unsigned long addr, > pmd_t old; > > preempt_disable(); > - if (mm_has_pgste(mm)) { > + if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m) { > pmdp_clear_skeys(mm, pmdp, new); > pmdp_notify(mm, addr); > } > @@ -452,7 +452,7 @@ pmd_t pmdp_xchg_lazy(struct mm_struct *mm, unsigned long addr, > pmd_t old; > > preempt_disable(); > - if (mm_has_pgste(mm)) { > + if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m) { > pmdp_clear_skeys(mm, pmdp, new); > pmdp_notify(mm, addr); > } > You should probably change the order of the last two patches, so you really only expose the capability if support for huge pages is there.
On 13.07.2018 11:00, David Hildenbrand wrote: > On 13.07.2018 08:37, Janosch Frank wrote: >> Let's allow huge pmd linking when enabled through the >> KVM_CAP_S390_HPAGE_1M capability. Also we can now restrict gmap >> invalidation and notification to the cases where the capability has >> been activated and save some cycles when that's not the case. >> >> @@ -435,7 +435,7 @@ pmd_t pmdp_xchg_direct(struct mm_struct *mm, unsigned long addr, >> pmd_t old; >> >> preempt_disable(); >> - if (mm_has_pgste(mm)) { >> + if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m) { >> pmdp_clear_skeys(mm, pmdp, new); >> pmdp_notify(mm, addr); >> } >> @@ -452,7 +452,7 @@ pmd_t pmdp_xchg_lazy(struct mm_struct *mm, unsigned long addr, >> pmd_t old; >> >> preempt_disable(); >> - if (mm_has_pgste(mm)) { >> + if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m) { >> pmdp_clear_skeys(mm, pmdp, new); >> pmdp_notify(mm, addr); >> } >> > > You should probably change the order of the last two patches, so you > really only expose the capability if support for huge pages is there. > Which would mean I don't have mm->context.allow_gmap_hpage_1m in this one and compilation breaks...
On 13.07.2018 12:18, Janosch Frank wrote: > On 13.07.2018 11:00, David Hildenbrand wrote: >> On 13.07.2018 08:37, Janosch Frank wrote: >>> Let's allow huge pmd linking when enabled through the >>> KVM_CAP_S390_HPAGE_1M capability. Also we can now restrict gmap >>> invalidation and notification to the cases where the capability has >>> been activated and save some cycles when that's not the case. >>> >>> @@ -435,7 +435,7 @@ pmd_t pmdp_xchg_direct(struct mm_struct *mm, unsigned long addr, >>> pmd_t old; >>> >>> preempt_disable(); >>> - if (mm_has_pgste(mm)) { >>> + if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m) { >>> pmdp_clear_skeys(mm, pmdp, new); >>> pmdp_notify(mm, addr); >>> } >>> @@ -452,7 +452,7 @@ pmd_t pmdp_xchg_lazy(struct mm_struct *mm, unsigned long addr, >>> pmd_t old; >>> >>> preempt_disable(); >>> - if (mm_has_pgste(mm)) { >>> + if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m) { >>> pmdp_clear_skeys(mm, pmdp, new); >>> pmdp_notify(mm, addr); >>> } >>> >> >> You should probably change the order of the last two patches, so you >> really only expose the capability if support for huge pages is there. >> > > Which would mean I don't have mm->context.allow_gmap_hpage_1m in this > one and compilation breaks... > Well you can easily move introduction also to this patch.
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c index 8b1a293b00d3..1399392d2fbd 100644 --- a/arch/s390/mm/gmap.c +++ b/arch/s390/mm/gmap.c @@ -2,8 +2,10 @@ /* * KVM guest address space mapping code * - * Copyright IBM Corp. 2007, 2016 + * Copyright IBM Corp. 2007, 2016, 2018 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> + * David Hildenbrand <david@redhat.com> + * Janosch Frank <frankja@linux.vnet.ibm.com> */ #include <linux/kernel.h> @@ -589,8 +591,8 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr) return -EFAULT; pmd = pmd_offset(pud, vmaddr); VM_BUG_ON(pmd_none(*pmd)); - /* large pmds cannot yet be handled */ - if (pmd_large(*pmd)) + /* Are we allowed to use huge pages? */ + if (pmd_large(*pmd) && !gmap->mm->context.allow_gmap_hpage_1m) return -EFAULT; /* Link gmap segment table entry location to page table. */ rc = radix_tree_preload(GFP_KERNEL); @@ -1631,6 +1633,7 @@ struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce, unsigned long limit; int rc; + BUG_ON(parent->mm->context.allow_gmap_hpage_1m); BUG_ON(gmap_is_shadow(parent)); spin_lock(&parent->shadow_lock); sg = gmap_find_shadow(parent, asce, edat_level); diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 161e08437681..1275f73b5c39 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -348,7 +348,7 @@ static inline void pmdp_idte_local(struct mm_struct *mm, mm->context.asce, IDTE_LOCAL); else __pmdp_idte(addr, pmdp, 0, 0, IDTE_LOCAL); - if (mm_has_pgste(mm)) + if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m) gmap_pmdp_idte_local(mm, addr); } @@ -358,15 +358,15 @@ static inline void pmdp_idte_global(struct mm_struct *mm, if (MACHINE_HAS_TLB_GUEST) { __pmdp_idte(addr, pmdp, IDTE_NODAT | IDTE_GUEST_ASCE, mm->context.asce, IDTE_GLOBAL); - if (mm_has_pgste(mm)) + if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m) gmap_pmdp_idte_global(mm, addr); } else if (MACHINE_HAS_IDTE) { __pmdp_idte(addr, pmdp, 0, 0, IDTE_GLOBAL); - if (mm_has_pgste(mm)) + if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m) gmap_pmdp_idte_global(mm, addr); } else { __pmdp_csp(pmdp); - if (mm_has_pgste(mm)) + if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m) gmap_pmdp_csp(mm, addr); } } @@ -435,7 +435,7 @@ pmd_t pmdp_xchg_direct(struct mm_struct *mm, unsigned long addr, pmd_t old; preempt_disable(); - if (mm_has_pgste(mm)) { + if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m) { pmdp_clear_skeys(mm, pmdp, new); pmdp_notify(mm, addr); } @@ -452,7 +452,7 @@ pmd_t pmdp_xchg_lazy(struct mm_struct *mm, unsigned long addr, pmd_t old; preempt_disable(); - if (mm_has_pgste(mm)) { + if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m) { pmdp_clear_skeys(mm, pmdp, new); pmdp_notify(mm, addr); }
Let's allow huge pmd linking when enabled through the KVM_CAP_S390_HPAGE_1M capability. Also we can now restrict gmap invalidation and notification to the cases where the capability has been activated and save some cycles when that's not the case. Signed-off-by: Janosch Frank <frankja@linux.ibm.com> --- arch/s390/mm/gmap.c | 9 ++++++--- arch/s390/mm/pgtable.c | 12 ++++++------ 2 files changed, 12 insertions(+), 9 deletions(-)