diff mbox

[v2] s390/mm: Add huge page dirty sync support

Message ID 20180713132056.8487-1-frankja@linux.ibm.com (mailing list archive)
State New, archived
Headers show

Commit Message

Janosch Frank July 13, 2018, 1:20 p.m. UTC
To do dirty loging with huge pages, we protect huge pmds in the
gmap. When they are written to, we unprotect them and mark them dirty.

We introduce the function gmap_test_and_clear_dirty_segment which
handles dirty sync for huge pages.

Signed-off-by: Janosch Frank <frankja@linux.ibm.com>
---

So, that's a shot from the hip, I'll have to review this on Monday,
but here's what David wanted.

---
 arch/s390/include/asm/gmap.h    |   3 +
 arch/s390/include/asm/pgtable.h |   3 +-
 arch/s390/kvm/kvm-s390.c        |  25 +++++---
 arch/s390/mm/gmap.c             | 125 +++++++++++++++++++++++++++++++++++++++-
 arch/s390/mm/pgtable.c          |  32 +---------
 5 files changed, 148 insertions(+), 40 deletions(-)

Comments

David Hildenbrand July 16, 2018, 12:02 p.m. UTC | #1
On 13.07.2018 15:20, Janosch Frank wrote:
> To do dirty loging with huge pages, we protect huge pmds in the
> gmap. When they are written to, we unprotect them and mark them dirty.
> 
> We introduce the function gmap_test_and_clear_dirty_segment which
> handles dirty sync for huge pages.
> 
> Signed-off-by: Janosch Frank <frankja@linux.ibm.com>
> ---
> 
> So, that's a shot from the hip, I'll have to review this on Monday,
> but here's what David wanted.

Looks cleaner to me!

> 
> ---
>  arch/s390/include/asm/gmap.h    |   3 +
>  arch/s390/include/asm/pgtable.h |   3 +-
>  arch/s390/kvm/kvm-s390.c        |  25 +++++---
>  arch/s390/mm/gmap.c             | 125 +++++++++++++++++++++++++++++++++++++++-
>  arch/s390/mm/pgtable.c          |  32 +---------
>  5 files changed, 148 insertions(+), 40 deletions(-)
> 
> diff --git a/arch/s390/include/asm/gmap.h b/arch/s390/include/asm/gmap.h
> index 276268b48aff..f923ed27ac6e 100644
> --- a/arch/s390/include/asm/gmap.h
> +++ b/arch/s390/include/asm/gmap.h
> @@ -15,6 +15,7 @@
>  
>  /* Status bits only for huge segment entries */
>  #define _SEGMENT_ENTRY_GMAP_IN		0x8000	/* invalidation notify bit */
> +#define _SEGMENT_ENTRY_GMAP_UC		0x4000	/* user dirty (migration) */

Strictly speaking, this is no longer a "user dirty" bit. It is simply a
SW dirty bit. (we will never track user related stuff with this bit)

>  
>  /**
>   * struct gmap_struct - guest address space
> @@ -139,4 +140,6 @@ void gmap_pte_notify(struct mm_struct *, unsigned long addr, pte_t *,
>  int gmap_mprotect_notify(struct gmap *, unsigned long start,
>  			 unsigned long len, int prot);
>  
> +void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long dirty_bitmap[4],
> +			     unsigned long gaddr, unsigned long vmaddr);
>  #endif /* _ASM_S390_GMAP_H */
> diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
> index 7f51e33f6456..bdf3c48ea301 100644
> --- a/arch/s390/include/asm/pgtable.h
> +++ b/arch/s390/include/asm/pgtable.h
> @@ -1104,7 +1104,8 @@ int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
>  		    pte_t *sptep, pte_t *tptep, pte_t pte);
>  void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep);
>  
> -bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long address);
> +bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long address,
> +				pte_t *ptep);
>  int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
>  			  unsigned char key, bool nq);
>  int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
> diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
> index 3b7a5151b6a5..ff090a2d0bbd 100644
> --- a/arch/s390/kvm/kvm-s390.c
> +++ b/arch/s390/kvm/kvm-s390.c
> @@ -511,19 +511,30 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
>  }
>  
>  static void kvm_s390_sync_dirty_log(struct kvm *kvm,
> -					struct kvm_memory_slot *memslot)
> +				    struct kvm_memory_slot *memslot)
>  {
> +	int i;
>  	gfn_t cur_gfn, last_gfn;
> -	unsigned long address;
> +	unsigned long gaddr, vmaddr;
> +	unsigned long bitmap[4];

DECLARE_BITMAP could be used.

>  	struct gmap *gmap = kvm->arch.gmap;
>  
> -	/* Loop over all guest pages */
> +	/* Loop over all guest segments */
> +	cur_gfn = memslot->base_gfn;
>  	last_gfn = memslot->base_gfn + memslot->npages;
> -	for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
> -		address = gfn_to_hva_memslot(memslot, cur_gfn);
> +	for (; cur_gfn <= last_gfn; cur_gfn += _PAGE_ENTRIES) {
> +		gaddr = gfn_to_gpa(cur_gfn);
> +		vmaddr = gfn_to_hva_memslot(memslot, cur_gfn);
> +		if (kvm_is_error_hva(vmaddr))
> +			continue;
> +
> +		bitmap_zero(bitmap, _PAGE_ENTRIES);
> +		gmap_sync_dirty_log_pmd(gmap, bitmap, gaddr, vmaddr);
> +		for (i = 0; i < _PAGE_ENTRIES; i++) {
> +			if (test_bit(i, bitmap))
> +				mark_page_dirty(kvm, cur_gfn + i);

Wonder if find_first_bit and friends would pay off here.

> +		}
>  
> -		if (test_and_clear_guest_dirty(gmap->mm, address))
> -			mark_page_dirty(kvm, cur_gfn);
>  		if (fatal_signal_pending(current))
>  			return;
>  		cond_resched();
> diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
> index 631584b39a5e..bb199eec79bb 100644
> --- a/arch/s390/mm/gmap.c
> +++ b/arch/s390/mm/gmap.c
> @@ -15,6 +15,7 @@
>  #include <linux/swapops.h>
>  #include <linux/ksm.h>
>  #include <linux/mman.h>
> +#include <linux/hugetlb.h>
>  
>  #include <asm/pgtable.h>
>  #include <asm/pgalloc.h>
> @@ -521,6 +522,9 @@ void gmap_unlink(struct mm_struct *mm, unsigned long *table,
>  	rcu_read_unlock();
>  }
>  
> +static void gmap_pmdp_xchg(struct gmap *gmap, pmd_t *old, pmd_t new,
> +			   unsigned long gaddr);
> +
>  /**
>   * gmap_link - set up shadow page tables to connect a host to a guest address
>   * @gmap: pointer to guest mapping meta data structure
> @@ -541,6 +545,7 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
>  	p4d_t *p4d;
>  	pud_t *pud;
>  	pmd_t *pmd;
> +	u64 unprot;
>  	int rc;
>  
>  	BUG_ON(gmap_is_shadow(gmap));
> @@ -598,12 +603,19 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
>  				       vmaddr >> PMD_SHIFT, table);
>  		if (!rc) {
>  			if (pmd_large(*pmd)) {
> -				*table = pmd_val(*pmd) &
> -					_SEGMENT_ENTRY_HARDWARE_BITS_LARGE;
> +				*table = (pmd_val(*pmd) &
> +					  _SEGMENT_ENTRY_HARDWARE_BITS_LARGE)
> +					| _SEGMENT_ENTRY_GMAP_UC;
>  			} else
>  				*table = pmd_val(*pmd) &
>  					_SEGMENT_ENTRY_HARDWARE_BITS;
>  		}
> +	} else if (*table & _SEGMENT_ENTRY_PROTECT &&
> +		   !(pmd_val(*pmd) & _SEGMENT_ENTRY_PROTECT)) {
> +		unprot = *table & _SEGMENT_ENTRY_HARDWARE_BITS_LARGE;

As I said, this looks somewhat dangerous. It is okay to clear notifiers
(has to be done) but if we ever add another bit, this will silently get
erased here. Not sure how this could be done better. My take would be to
always let gmap_pmdp_xchg() clear the notifier bits in the new value and
not remove any bit except protection at this point.

> +		unprot &= ~_SEGMENT_ENTRY_PROTECT;
> +		unprot |= _SEGMENT_ENTRY_GMAP_UC;
> +		gmap_pmdp_xchg(gmap, (pmd_t *)table, __pmd(unprot), gaddr);
>  	}
>  	spin_unlock(&gmap->guest_table_lock);
>  	spin_unlock(ptl);
> @@ -931,11 +943,23 @@ static int gmap_protect_pmd(struct gmap *gmap, unsigned long gaddr,
>  {
>  	int pmd_i = pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID;
>  	int pmd_p = pmd_val(*pmdp) & _SEGMENT_ENTRY_PROTECT;
> +	pmd_t new = *pmdp;
>  
>  	/* Fixup needed */
>  	if ((pmd_i && (prot != PROT_NONE)) || (pmd_p && (prot == PROT_WRITE)))
>  		return -EAGAIN;
>  
> +	if (prot == PROT_NONE && !pmd_i) {
> +		pmd_val(new) |= _SEGMENT_ENTRY_INVALID;
> +		gmap_pmdp_xchg(gmap, pmdp, new, gaddr);
> +	}
> +
> +	if (prot == PROT_READ && !pmd_p) {
> +		pmd_val(new) &= ~_SEGMENT_ENTRY_INVALID;
> +		pmd_val(new) |= _SEGMENT_ENTRY_PROTECT;
> +		gmap_pmdp_xchg(gmap, pmdp, new, gaddr);
> +	}
> +
>  	if (bits & GMAP_NOTIFY_MPROT)
>  		pmd_val(*pmdp) |= _SEGMENT_ENTRY_GMAP_IN;
>  
> @@ -2222,6 +2246,13 @@ void ptep_notify(struct mm_struct *mm, unsigned long vmaddr,
>  }
>  EXPORT_SYMBOL_GPL(ptep_notify);
>  
> +static void pmdp_notify_gmap(struct gmap *gmap, pmd_t *pmdp,
> +			     unsigned long gaddr)
> +{
> +	pmd_val(*pmdp) &= ~_SEGMENT_ENTRY_GMAP_IN;
> +	gmap_call_notifier(gmap, gaddr, gaddr + HPAGE_SIZE - 1);
> +}
> +
>  /**
>   * pmdp_notify - call all invalidation callbacks for a specific pmd
>   * @mm: pointer to the process mm_struct
> @@ -2252,6 +2283,31 @@ void pmdp_notify(struct mm_struct *mm, unsigned long vmaddr)
>  }
>  EXPORT_SYMBOL_GPL(pmdp_notify);
>  
> +/**
> + * gmap_pmdp_xchg - exchange a gmap pmd with another
> + * @gmap: pointer to the guest address space structure
> + * @pmdp: pointer to the pmd entry
> + * @new: replacement entry
> + * @gaddr: the affected guest address
> + *
> + * This function is assumed to be called with the guest_table_lock
> + * held.
> + */
> +static void gmap_pmdp_xchg(struct gmap *gmap, pmd_t *pmdp, pmd_t new,
> +			   unsigned long gaddr)
> +{
> +	gaddr &= HPAGE_MASK;
> +	pmdp_notify_gmap(gmap, pmdp, gaddr);
> +	if (MACHINE_HAS_TLB_GUEST)
> +		__pmdp_idte(gaddr, (pmd_t *)pmdp, IDTE_GUEST_ASCE, gmap->asce,
> +			    IDTE_GLOBAL);
> +	else if (MACHINE_HAS_IDTE)
> +		__pmdp_idte(gaddr, (pmd_t *)pmdp, 0, 0, IDTE_GLOBAL);
> +	else
> +		__pmdp_csp(pmdp);
> +	*pmdp = new;
> +}
> +
>  static void gmap_pmdp_clear(struct mm_struct *mm, unsigned long vmaddr,
>  			    int purge)
>  {
> @@ -2372,6 +2428,71 @@ void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr)
>  }
>  EXPORT_SYMBOL_GPL(gmap_pmdp_idte_global);
>  
> +/**
> + * gmap_test_and_clear_dirty_segment - test and reset segment dirty status
> + * @gmap: pointer to guest address space
> + * @pmdp: pointer to the pmd to be tested
> + * @gaddr: virtual address in the guest address space
> + *
> + * This function is assumed to be called with the guest_table_lock
> + * held.
> + */
> +bool gmap_test_and_clear_dirty_segment(struct gmap *gmap, pmd_t *pmdp,
> +				       unsigned long gaddr)

gmap_pmdp_test_and_clear_dirty() /
gmap_test_and_clear_dirty_pmdp() ?

> +{
> +	if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
> +		return false;
> +
> +	/* Already protected memory, which did not change is clean */
> +	if (pmd_val(*pmdp) & _SEGMENT_ENTRY_PROTECT &&
> +	    !(pmd_val(*pmdp) & _SEGMENT_ENTRY_GMAP_UC))
> +		return false;
> +
> +	/* Clear UC indication and reset protection */
> +	pmd_val(*pmdp) &= ~_SEGMENT_ENTRY_GMAP_UC;
> +	gmap_protect_pmd(gmap, gaddr, pmdp, PROT_READ, 0);
> +	return true;
> +}
> +
> +/**
> + * gmap_sync_dirty_log_pmd - set bitmap based on dirty status of segment
> + * @gmap: pointer to guest address space
> + * @bitmap: dirty bitmap for this pmd
> + * @gaddr: virtual address in the guest address space
> + * @vmaddr: virtual address in the host address space
> + *
> + * This function is assumed to be called with the guest_table_lock
> + * held.
> + */
> +void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long bitmap[4],
> +			     unsigned long gaddr, unsigned long vmaddr)
> +{
> +	int i = 0;
> +	pmd_t *pmdp;
> +	pte_t *ptep;
> +	spinlock_t *ptl;
> +
> +	pmdp = gmap_pmd_op_walk(gmap, gaddr);
> +	if (!pmdp)
> +		return;
> +
> +	if (pmd_large(*pmdp)) {
> +		if (gmap_test_and_clear_dirty_segment(gmap, pmdp, gaddr))
> +			bitmap_fill(bitmap, _PAGE_ENTRIES);> +	} else {
> +		for (; i < _PAGE_ENTRIES; i++, vmaddr += PAGE_SIZE) {

just initialize i to 0 here. Makes this easier to read at least for me.

> +			ptep = pte_alloc_map_lock(gmap->mm, pmdp, vmaddr, &ptl);
> +			if (!ptep)
> +				continue;
> +			if (test_and_clear_guest_dirty(gmap->mm, vmaddr, ptep))
> +				set_bit(i, bitmap);
> +			spin_unlock(ptl);
> +		}
> +	}
> +	gmap_pmd_op_end(gmap, pmdp);
> +}
> +EXPORT_SYMBOL_GPL(gmap_sync_dirty_log_pmd);
> +
>  static inline void thp_split_mm(struct mm_struct *mm)
>  {
>  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
> diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
> index 7bdb15fc5487..580caa5be956 100644
> --- a/arch/s390/mm/pgtable.c
> +++ b/arch/s390/mm/pgtable.c
> @@ -708,40 +708,14 @@ void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
>  /*
>   * Test and reset if a guest page is dirty
>   */
> -bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long addr)
> +bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long addr,
> +				pte_t *ptep)


I think we should even go ahead and rename this to something like ptep_...

e.g. ptept_test_and_clear_uc()

>  {
> -	spinlock_t *ptl;
> -	pgd_t *pgd;
> -	p4d_t *p4d;
> -	pud_t *pud;
> -	pmd_t *pmd;
>  	pgste_t pgste;
> -	pte_t *ptep;
>  	pte_t pte;
>  	bool dirty;
>  	int nodat;
>  
> -	pgd = pgd_offset(mm, addr);
> -	p4d = p4d_alloc(mm, pgd, addr);
> -	if (!p4d)
> -		return false;
> -	pud = pud_alloc(mm, p4d, addr);
> -	if (!pud)
> -		return false;
> -	pmd = pmd_alloc(mm, pud, addr);
> -	if (!pmd)
> -		return false;
> -	/* We can't run guests backed by huge pages, but userspace can
> -	 * still set them up and then try to migrate them without any
> -	 * migration support.
> -	 */
> -	if (pmd_large(*pmd))
> -		return true;
> -
> -	ptep = pte_alloc_map_lock(mm, pmd, addr, &ptl);
> -	if (unlikely(!ptep))
> -		return false;
> -
>  	pgste = pgste_get_lock(ptep);
>  	dirty = !!(pgste_val(pgste) & PGSTE_UC_BIT);
>  	pgste_val(pgste) &= ~PGSTE_UC_BIT;
> @@ -757,8 +731,6 @@ bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long addr)
>  		*ptep = pte;
>  	}
>  	pgste_set_unlock(ptep, pgste);
> -
> -	spin_unlock(ptl);
>  	return dirty;
>  }
>  EXPORT_SYMBOL_GPL(test_and_clear_guest_dirty);
>
Janosch Frank July 16, 2018, 1:04 p.m. UTC | #2
On 16.07.2018 14:02, David Hildenbrand wrote:
> On 13.07.2018 15:20, Janosch Frank wrote:
>> To do dirty loging with huge pages, we protect huge pmds in the
>> gmap. When they are written to, we unprotect them and mark them dirty.
>>
>> We introduce the function gmap_test_and_clear_dirty_segment which
>> handles dirty sync for huge pages.
>>
>> Signed-off-by: Janosch Frank <frankja@linux.ibm.com>
>> ---
>>
>> So, that's a shot from the hip, I'll have to review this on Monday,
>> but here's what David wanted.
> 
> Looks cleaner to me!

>>  	BUG_ON(gmap_is_shadow(gmap));
>> @@ -598,12 +603,19 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
>>  				       vmaddr >> PMD_SHIFT, table);
>>  		if (!rc) {
>>  			if (pmd_large(*pmd)) {
>> -				*table = pmd_val(*pmd) &
>> -					_SEGMENT_ENTRY_HARDWARE_BITS_LARGE;
>> +				*table = (pmd_val(*pmd) &
>> +					  _SEGMENT_ENTRY_HARDWARE_BITS_LARGE)
>> +					| _SEGMENT_ENTRY_GMAP_UC;
>>  			} else
>>  				*table = pmd_val(*pmd) &
>>  					_SEGMENT_ENTRY_HARDWARE_BITS;
>>  		}
>> +	} else if (*table & _SEGMENT_ENTRY_PROTECT &&
>> +		   !(pmd_val(*pmd) & _SEGMENT_ENTRY_PROTECT)) {
>> +		unprot = *table & _SEGMENT_ENTRY_HARDWARE_BITS_LARGE;
> 
> As I said, this looks somewhat dangerous. It is okay to clear notifiers
> (has to be done) but if we ever add another bit, this will silently get
> erased here. Not sure how this could be done better. My take would be to
> always let gmap_pmdp_xchg() clear the notifier bits in the new value and
> not remove any bit except protection at this point.

Alright, this will also solve, that we do not remove notifiers on
gmap_protect_pmd right now...

For split and VSIE I will add definitions for status and notifier bits,
for easier masking, something like:
#define _SEGMENT_ENTRY_NOTIFY_BITS (_SEGMENT_ENTRY_GMAP_IN |
_SEGMENT_ENTRY_GMAP_VSIE)

#define _SEGMENT_ENTRY_STATUS_BITS (_SEGMENT_ENTRY_GMAP_UC |
_SEGMENT_ENTRY_SPLIT)


I applied all of your other changes, except for the find_first_bit
stuff, I'll worry about that later.
David Hildenbrand July 16, 2018, 1:42 p.m. UTC | #3
On 16.07.2018 15:04, Janosch Frank wrote:
> On 16.07.2018 14:02, David Hildenbrand wrote:
>> On 13.07.2018 15:20, Janosch Frank wrote:
>>> To do dirty loging with huge pages, we protect huge pmds in the
>>> gmap. When they are written to, we unprotect them and mark them dirty.
>>>
>>> We introduce the function gmap_test_and_clear_dirty_segment which
>>> handles dirty sync for huge pages.
>>>
>>> Signed-off-by: Janosch Frank <frankja@linux.ibm.com>
>>> ---
>>>
>>> So, that's a shot from the hip, I'll have to review this on Monday,
>>> but here's what David wanted.
>>
>> Looks cleaner to me!
> 
>>>  	BUG_ON(gmap_is_shadow(gmap));
>>> @@ -598,12 +603,19 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
>>>  				       vmaddr >> PMD_SHIFT, table);
>>>  		if (!rc) {
>>>  			if (pmd_large(*pmd)) {
>>> -				*table = pmd_val(*pmd) &
>>> -					_SEGMENT_ENTRY_HARDWARE_BITS_LARGE;
>>> +				*table = (pmd_val(*pmd) &
>>> +					  _SEGMENT_ENTRY_HARDWARE_BITS_LARGE)
>>> +					| _SEGMENT_ENTRY_GMAP_UC;
>>>  			} else
>>>  				*table = pmd_val(*pmd) &
>>>  					_SEGMENT_ENTRY_HARDWARE_BITS;
>>>  		}
>>> +	} else if (*table & _SEGMENT_ENTRY_PROTECT &&
>>> +		   !(pmd_val(*pmd) & _SEGMENT_ENTRY_PROTECT)) {
>>> +		unprot = *table & _SEGMENT_ENTRY_HARDWARE_BITS_LARGE;
>>
>> As I said, this looks somewhat dangerous. It is okay to clear notifiers
>> (has to be done) but if we ever add another bit, this will silently get
>> erased here. Not sure how this could be done better. My take would be to
>> always let gmap_pmdp_xchg() clear the notifier bits in the new value and
>> not remove any bit except protection at this point.
> 
> Alright, this will also solve, that we do not remove notifiers on
> gmap_protect_pmd right now...
> 
> For split and VSIE I will add definitions for status and notifier bits,
> for easier masking, something like:
> #define _SEGMENT_ENTRY_NOTIFY_BITS (_SEGMENT_ENTRY_GMAP_IN |
> _SEGMENT_ENTRY_GMAP_VSIE)
> 
> #define _SEGMENT_ENTRY_STATUS_BITS (_SEGMENT_ENTRY_GMAP_UC |
> _SEGMENT_ENTRY_SPLIT)

Makes sense, I wonder if we should put GMAP somewhere in there ...

> 
> 
> I applied all of your other changes, except for the find_first_bit
> stuff, I'll worry about that later.
>
diff mbox

Patch

diff --git a/arch/s390/include/asm/gmap.h b/arch/s390/include/asm/gmap.h
index 276268b48aff..f923ed27ac6e 100644
--- a/arch/s390/include/asm/gmap.h
+++ b/arch/s390/include/asm/gmap.h
@@ -15,6 +15,7 @@ 
 
 /* Status bits only for huge segment entries */
 #define _SEGMENT_ENTRY_GMAP_IN		0x8000	/* invalidation notify bit */
+#define _SEGMENT_ENTRY_GMAP_UC		0x4000	/* user dirty (migration) */
 
 /**
  * struct gmap_struct - guest address space
@@ -139,4 +140,6 @@  void gmap_pte_notify(struct mm_struct *, unsigned long addr, pte_t *,
 int gmap_mprotect_notify(struct gmap *, unsigned long start,
 			 unsigned long len, int prot);
 
+void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long dirty_bitmap[4],
+			     unsigned long gaddr, unsigned long vmaddr);
 #endif /* _ASM_S390_GMAP_H */
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 7f51e33f6456..bdf3c48ea301 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -1104,7 +1104,8 @@  int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
 		    pte_t *sptep, pte_t *tptep, pte_t pte);
 void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep);
 
-bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long address);
+bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long address,
+				pte_t *ptep);
 int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
 			  unsigned char key, bool nq);
 int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 3b7a5151b6a5..ff090a2d0bbd 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -511,19 +511,30 @@  int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
 }
 
 static void kvm_s390_sync_dirty_log(struct kvm *kvm,
-					struct kvm_memory_slot *memslot)
+				    struct kvm_memory_slot *memslot)
 {
+	int i;
 	gfn_t cur_gfn, last_gfn;
-	unsigned long address;
+	unsigned long gaddr, vmaddr;
+	unsigned long bitmap[4];
 	struct gmap *gmap = kvm->arch.gmap;
 
-	/* Loop over all guest pages */
+	/* Loop over all guest segments */
+	cur_gfn = memslot->base_gfn;
 	last_gfn = memslot->base_gfn + memslot->npages;
-	for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
-		address = gfn_to_hva_memslot(memslot, cur_gfn);
+	for (; cur_gfn <= last_gfn; cur_gfn += _PAGE_ENTRIES) {
+		gaddr = gfn_to_gpa(cur_gfn);
+		vmaddr = gfn_to_hva_memslot(memslot, cur_gfn);
+		if (kvm_is_error_hva(vmaddr))
+			continue;
+
+		bitmap_zero(bitmap, _PAGE_ENTRIES);
+		gmap_sync_dirty_log_pmd(gmap, bitmap, gaddr, vmaddr);
+		for (i = 0; i < _PAGE_ENTRIES; i++) {
+			if (test_bit(i, bitmap))
+				mark_page_dirty(kvm, cur_gfn + i);
+		}
 
-		if (test_and_clear_guest_dirty(gmap->mm, address))
-			mark_page_dirty(kvm, cur_gfn);
 		if (fatal_signal_pending(current))
 			return;
 		cond_resched();
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index 631584b39a5e..bb199eec79bb 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -15,6 +15,7 @@ 
 #include <linux/swapops.h>
 #include <linux/ksm.h>
 #include <linux/mman.h>
+#include <linux/hugetlb.h>
 
 #include <asm/pgtable.h>
 #include <asm/pgalloc.h>
@@ -521,6 +522,9 @@  void gmap_unlink(struct mm_struct *mm, unsigned long *table,
 	rcu_read_unlock();
 }
 
+static void gmap_pmdp_xchg(struct gmap *gmap, pmd_t *old, pmd_t new,
+			   unsigned long gaddr);
+
 /**
  * gmap_link - set up shadow page tables to connect a host to a guest address
  * @gmap: pointer to guest mapping meta data structure
@@ -541,6 +545,7 @@  int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
 	p4d_t *p4d;
 	pud_t *pud;
 	pmd_t *pmd;
+	u64 unprot;
 	int rc;
 
 	BUG_ON(gmap_is_shadow(gmap));
@@ -598,12 +603,19 @@  int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
 				       vmaddr >> PMD_SHIFT, table);
 		if (!rc) {
 			if (pmd_large(*pmd)) {
-				*table = pmd_val(*pmd) &
-					_SEGMENT_ENTRY_HARDWARE_BITS_LARGE;
+				*table = (pmd_val(*pmd) &
+					  _SEGMENT_ENTRY_HARDWARE_BITS_LARGE)
+					| _SEGMENT_ENTRY_GMAP_UC;
 			} else
 				*table = pmd_val(*pmd) &
 					_SEGMENT_ENTRY_HARDWARE_BITS;
 		}
+	} else if (*table & _SEGMENT_ENTRY_PROTECT &&
+		   !(pmd_val(*pmd) & _SEGMENT_ENTRY_PROTECT)) {
+		unprot = *table & _SEGMENT_ENTRY_HARDWARE_BITS_LARGE;
+		unprot &= ~_SEGMENT_ENTRY_PROTECT;
+		unprot |= _SEGMENT_ENTRY_GMAP_UC;
+		gmap_pmdp_xchg(gmap, (pmd_t *)table, __pmd(unprot), gaddr);
 	}
 	spin_unlock(&gmap->guest_table_lock);
 	spin_unlock(ptl);
@@ -931,11 +943,23 @@  static int gmap_protect_pmd(struct gmap *gmap, unsigned long gaddr,
 {
 	int pmd_i = pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID;
 	int pmd_p = pmd_val(*pmdp) & _SEGMENT_ENTRY_PROTECT;
+	pmd_t new = *pmdp;
 
 	/* Fixup needed */
 	if ((pmd_i && (prot != PROT_NONE)) || (pmd_p && (prot == PROT_WRITE)))
 		return -EAGAIN;
 
+	if (prot == PROT_NONE && !pmd_i) {
+		pmd_val(new) |= _SEGMENT_ENTRY_INVALID;
+		gmap_pmdp_xchg(gmap, pmdp, new, gaddr);
+	}
+
+	if (prot == PROT_READ && !pmd_p) {
+		pmd_val(new) &= ~_SEGMENT_ENTRY_INVALID;
+		pmd_val(new) |= _SEGMENT_ENTRY_PROTECT;
+		gmap_pmdp_xchg(gmap, pmdp, new, gaddr);
+	}
+
 	if (bits & GMAP_NOTIFY_MPROT)
 		pmd_val(*pmdp) |= _SEGMENT_ENTRY_GMAP_IN;
 
@@ -2222,6 +2246,13 @@  void ptep_notify(struct mm_struct *mm, unsigned long vmaddr,
 }
 EXPORT_SYMBOL_GPL(ptep_notify);
 
+static void pmdp_notify_gmap(struct gmap *gmap, pmd_t *pmdp,
+			     unsigned long gaddr)
+{
+	pmd_val(*pmdp) &= ~_SEGMENT_ENTRY_GMAP_IN;
+	gmap_call_notifier(gmap, gaddr, gaddr + HPAGE_SIZE - 1);
+}
+
 /**
  * pmdp_notify - call all invalidation callbacks for a specific pmd
  * @mm: pointer to the process mm_struct
@@ -2252,6 +2283,31 @@  void pmdp_notify(struct mm_struct *mm, unsigned long vmaddr)
 }
 EXPORT_SYMBOL_GPL(pmdp_notify);
 
+/**
+ * gmap_pmdp_xchg - exchange a gmap pmd with another
+ * @gmap: pointer to the guest address space structure
+ * @pmdp: pointer to the pmd entry
+ * @new: replacement entry
+ * @gaddr: the affected guest address
+ *
+ * This function is assumed to be called with the guest_table_lock
+ * held.
+ */
+static void gmap_pmdp_xchg(struct gmap *gmap, pmd_t *pmdp, pmd_t new,
+			   unsigned long gaddr)
+{
+	gaddr &= HPAGE_MASK;
+	pmdp_notify_gmap(gmap, pmdp, gaddr);
+	if (MACHINE_HAS_TLB_GUEST)
+		__pmdp_idte(gaddr, (pmd_t *)pmdp, IDTE_GUEST_ASCE, gmap->asce,
+			    IDTE_GLOBAL);
+	else if (MACHINE_HAS_IDTE)
+		__pmdp_idte(gaddr, (pmd_t *)pmdp, 0, 0, IDTE_GLOBAL);
+	else
+		__pmdp_csp(pmdp);
+	*pmdp = new;
+}
+
 static void gmap_pmdp_clear(struct mm_struct *mm, unsigned long vmaddr,
 			    int purge)
 {
@@ -2372,6 +2428,71 @@  void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr)
 }
 EXPORT_SYMBOL_GPL(gmap_pmdp_idte_global);
 
+/**
+ * gmap_test_and_clear_dirty_segment - test and reset segment dirty status
+ * @gmap: pointer to guest address space
+ * @pmdp: pointer to the pmd to be tested
+ * @gaddr: virtual address in the guest address space
+ *
+ * This function is assumed to be called with the guest_table_lock
+ * held.
+ */
+bool gmap_test_and_clear_dirty_segment(struct gmap *gmap, pmd_t *pmdp,
+				       unsigned long gaddr)
+{
+	if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
+		return false;
+
+	/* Already protected memory, which did not change is clean */
+	if (pmd_val(*pmdp) & _SEGMENT_ENTRY_PROTECT &&
+	    !(pmd_val(*pmdp) & _SEGMENT_ENTRY_GMAP_UC))
+		return false;
+
+	/* Clear UC indication and reset protection */
+	pmd_val(*pmdp) &= ~_SEGMENT_ENTRY_GMAP_UC;
+	gmap_protect_pmd(gmap, gaddr, pmdp, PROT_READ, 0);
+	return true;
+}
+
+/**
+ * gmap_sync_dirty_log_pmd - set bitmap based on dirty status of segment
+ * @gmap: pointer to guest address space
+ * @bitmap: dirty bitmap for this pmd
+ * @gaddr: virtual address in the guest address space
+ * @vmaddr: virtual address in the host address space
+ *
+ * This function is assumed to be called with the guest_table_lock
+ * held.
+ */
+void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long bitmap[4],
+			     unsigned long gaddr, unsigned long vmaddr)
+{
+	int i = 0;
+	pmd_t *pmdp;
+	pte_t *ptep;
+	spinlock_t *ptl;
+
+	pmdp = gmap_pmd_op_walk(gmap, gaddr);
+	if (!pmdp)
+		return;
+
+	if (pmd_large(*pmdp)) {
+		if (gmap_test_and_clear_dirty_segment(gmap, pmdp, gaddr))
+			bitmap_fill(bitmap, _PAGE_ENTRIES);
+	} else {
+		for (; i < _PAGE_ENTRIES; i++, vmaddr += PAGE_SIZE) {
+			ptep = pte_alloc_map_lock(gmap->mm, pmdp, vmaddr, &ptl);
+			if (!ptep)
+				continue;
+			if (test_and_clear_guest_dirty(gmap->mm, vmaddr, ptep))
+				set_bit(i, bitmap);
+			spin_unlock(ptl);
+		}
+	}
+	gmap_pmd_op_end(gmap, pmdp);
+}
+EXPORT_SYMBOL_GPL(gmap_sync_dirty_log_pmd);
+
 static inline void thp_split_mm(struct mm_struct *mm)
 {
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 7bdb15fc5487..580caa5be956 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -708,40 +708,14 @@  void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 /*
  * Test and reset if a guest page is dirty
  */
-bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long addr)
+bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long addr,
+				pte_t *ptep)
 {
-	spinlock_t *ptl;
-	pgd_t *pgd;
-	p4d_t *p4d;
-	pud_t *pud;
-	pmd_t *pmd;
 	pgste_t pgste;
-	pte_t *ptep;
 	pte_t pte;
 	bool dirty;
 	int nodat;
 
-	pgd = pgd_offset(mm, addr);
-	p4d = p4d_alloc(mm, pgd, addr);
-	if (!p4d)
-		return false;
-	pud = pud_alloc(mm, p4d, addr);
-	if (!pud)
-		return false;
-	pmd = pmd_alloc(mm, pud, addr);
-	if (!pmd)
-		return false;
-	/* We can't run guests backed by huge pages, but userspace can
-	 * still set them up and then try to migrate them without any
-	 * migration support.
-	 */
-	if (pmd_large(*pmd))
-		return true;
-
-	ptep = pte_alloc_map_lock(mm, pmd, addr, &ptl);
-	if (unlikely(!ptep))
-		return false;
-
 	pgste = pgste_get_lock(ptep);
 	dirty = !!(pgste_val(pgste) & PGSTE_UC_BIT);
 	pgste_val(pgste) &= ~PGSTE_UC_BIT;
@@ -757,8 +731,6 @@  bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long addr)
 		*ptep = pte;
 	}
 	pgste_set_unlock(ptep, pgste);
-
-	spin_unlock(ptl);
 	return dirty;
 }
 EXPORT_SYMBOL_GPL(test_and_clear_guest_dirty);