diff mbox series

[02/35] KVM: s390/interrupt: do not pin adapter interrupt pages

Message ID 20200207113958.7320-3-borntraeger@de.ibm.com (mailing list archive)
State New, archived
Headers show
Series KVM: s390: Add support for protected VMs | expand

Commit Message

Christian Borntraeger Feb. 7, 2020, 11:39 a.m. UTC
From: Ulrich Weigand <Ulrich.Weigand@de.ibm.com>

The adapter interrupt page containing the indicator bits is currently
pinned. That means that a guest with many devices can pin a lot of
memory pages in the host. This also complicates the reference tracking
which is needed for memory management handling of protected virtual
machines.
We can reuse the pte notifiers to "cache" the page without pinning it.

Signed-off-by: Ulrich Weigand <Ulrich.Weigand@de.ibm.com>
Suggested-by: Andrea Arcangeli <aarcange@redhat.com>
[borntraeger@de.ibm.com: patch merging, splitting, fixing]
Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
---
 arch/s390/include/asm/kvm_host.h |   4 +-
 arch/s390/kvm/interrupt.c        | 155 +++++++++++++++++++++++--------
 arch/s390/kvm/kvm-s390.c         |   4 +
 arch/s390/kvm/kvm-s390.h         |   2 +
 4 files changed, 123 insertions(+), 42 deletions(-)

Comments

David Hildenbrand Feb. 10, 2020, 12:26 p.m. UTC | #1
On 07.02.20 12:39, Christian Borntraeger wrote:
> From: Ulrich Weigand <Ulrich.Weigand@de.ibm.com>
> 
> The adapter interrupt page containing the indicator bits is currently
> pinned. That means that a guest with many devices can pin a lot of
> memory pages in the host. This also complicates the reference tracking
> which is needed for memory management handling of protected virtual
> machines.
> We can reuse the pte notifiers to "cache" the page without pinning it.
> 
> Signed-off-by: Ulrich Weigand <Ulrich.Weigand@de.ibm.com>
> Suggested-by: Andrea Arcangeli <aarcange@redhat.com>
> [borntraeger@de.ibm.com: patch merging, splitting, fixing]
> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
> ---

So, instead of pinning explicitly, look up the page address, cache it,
and glue its lifetime to the gmap table entry. When that entry is
changed, invalidate the cached page. On re-access, look up the page
again and register the gmap notifier for the table entry again.

[...]

>  #define MAX_S390_IO_ADAPTERS ((MAX_ISC + 1) * 8)
> diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
> index c06c89d370a7..4bfb2f8fe57c 100644
> --- a/arch/s390/kvm/interrupt.c
> +++ b/arch/s390/kvm/interrupt.c
> @@ -28,6 +28,7 @@
>  #include <asm/switch_to.h>
>  #include <asm/nmi.h>
>  #include <asm/airq.h>
> +#include <linux/pagemap.h>
>  #include "kvm-s390.h"
>  #include "gaccess.h"
>  #include "trace-s390.h"
> @@ -2328,8 +2329,8 @@ static int register_io_adapter(struct kvm_device *dev,
>  		return -ENOMEM;
>  
>  	INIT_LIST_HEAD(&adapter->maps);
> -	init_rwsem(&adapter->maps_lock);
> -	atomic_set(&adapter->nr_maps, 0);
> +	spin_lock_init(&adapter->maps_lock);
> +	adapter->nr_maps = 0;
>  	adapter->id = adapter_info.id;
>  	adapter->isc = adapter_info.isc;
>  	adapter->maskable = adapter_info.maskable;
> @@ -2375,19 +2376,15 @@ static int kvm_s390_adapter_map(struct kvm *kvm, unsigned int id, __u64 addr)
>  		ret = -EFAULT;
>  		goto out;
>  	}
> -	ret = get_user_pages_fast(map->addr, 1, FOLL_WRITE, &map->page);
> -	if (ret < 0)
> -		goto out;
> -	BUG_ON(ret != 1);
> -	down_write(&adapter->maps_lock);
> -	if (atomic_inc_return(&adapter->nr_maps) < MAX_S390_ADAPTER_MAPS) {
> +	spin_lock(&adapter->maps_lock);
> +	if (adapter->nr_maps < MAX_S390_ADAPTER_MAPS) {
> +		adapter->nr_maps++;
>  		list_add_tail(&map->list, &adapter->maps);

I do wonder if we should check for duplicates. The unmap path will only
remove exactly one entry. But maybe this can never happen or is already
handled on a a higher layer.

>  }
> @@ -2430,7 +2426,6 @@ void kvm_s390_destroy_adapters(struct kvm *kvm)
>  		list_for_each_entry_safe(map, tmp,
>  					 &kvm->arch.adapters[i]->maps, list) {
>  			list_del(&map->list);
> -			put_page(map->page);
>  			kfree(map);
>  		}
>  		kfree(kvm->arch.adapters[i]);

Between the gmap being removed in kvm_arch_vcpu_destroy() and
kvm_s390_destroy_adapters(), the entries would no longer properly get
invalidated. AFAIK, removing/freeing the gmap will not trigger any
notifiers.

Not sure if that's an issue (IOW, if we can have some very weird race).
But I guess we would have similar races already :)

> @@ -2690,6 +2685,31 @@ struct kvm_device_ops kvm_flic_ops = {
>  	.destroy = flic_destroy,
>  };
>  
> +void kvm_s390_adapter_gmap_notifier(struct gmap *gmap, unsigned long start,
> +				    unsigned long end)
> +{
> +	struct kvm *kvm = gmap->private;
> +	struct s390_map_info *map, *tmp;
> +	int i;
> +
> +	for (i = 0; i < MAX_S390_IO_ADAPTERS; i++) {
> +		struct s390_io_adapter *adapter = kvm->arch.adapters[i];
> +
> +		if (!adapter)
> +			continue;

I have to ask very dumb: How is kvm->arch.adapters[] protected?

I don't see any explicit locking e.g., on
flic_set_attr()->register_io_adapter().


[...]> +static struct page *get_map_page(struct kvm *kvm,
> +				 struct s390_io_adapter *adapter,
> +				 u64 addr)
>  {
>  	struct s390_map_info *map;
> +	unsigned long uaddr;
> +	struct page *page;
> +	bool need_retry;
> +	int ret;
>  
>  	if (!adapter)
>  		return NULL;
> +retry:
> +	page = NULL;
> +	uaddr = 0;
> +	spin_lock(&adapter->maps_lock);
> +	list_for_each_entry(map, &adapter->maps, list)
> +		if (map->guest_addr == addr) {

Could it happen, that we don't have a fitting entry in the list?

> +			uaddr = map->addr;
> +			page = map->page;
> +			if (!page)
> +				map->page = ERR_PTR(-EBUSY);
> +			else if (IS_ERR(page) || !page_cache_get_speculative(page)) {
> +				spin_unlock(&adapter->maps_lock);
> +				goto retry;
> +			}
> +			break;
> +		}

Can we please factor out looking up the list entry to a separate
function, to be called under lock? (and e.g., use it below as well)

spin_lock(&adapter->maps_lock);
entry = fancy_new_function();
if (!entry)
	return NULL;
uaddr = entry->addr;
page = entry->page;
if (!page)
	...
spin_unlock(&adapter->maps_lock);


> +	spin_unlock(&adapter->maps_lock);
> +
> +	if (page)
> +		return page;
> +	if (!uaddr)
> +		return NULL;
>  
> -	list_for_each_entry(map, &adapter->maps, list) {
> -		if (map->guest_addr == addr)
> -			return map;
> +	down_read(&kvm->mm->mmap_sem);
> +	ret = set_pgste_bits(kvm->mm, uaddr, PGSTE_IN_BIT, PGSTE_IN_BIT);
> +	if (ret)
> +		goto fail;
> +	ret = get_user_pages_remote(NULL, kvm->mm, uaddr, 1, FOLL_WRITE,
> +				    &page, NULL, NULL);
> +	if (ret < 1)
> +		page = NULL;
> +fail:
> +	up_read(&kvm->mm->mmap_sem);
> +	need_retry = true;
> +	spin_lock(&adapter->maps_lock);
> +	list_for_each_entry(map, &adapter->maps, list)
> +		if (map->guest_addr == addr) {

Could it happen that our entry is suddenly no longer in the list?

> +			if (map->page == ERR_PTR(-EBUSY)) {
> +				map->page = page;
> +				need_retry = false;
> +			} else if (IS_ERR(map->page)) {

else if (map->page == ERR_PTR(-EINVAL)

or simpy "else" (every other value would be a BUG_ON, right?)

/* race with a notifier - don't store the entry and retry */

> +				map->page = NULL;> +			}



> +			break;
> +		}
> +	spin_unlock(&adapter->maps_lock);
> +	if (need_retry) {
> +		if (page)
> +			put_page(page);
> +		goto retry;
>  	}
> -	return NULL;
> +
> +	return page;

Wow, this function is ... special. Took me way to long to figure out
what is going on here. We certainly need comments in there.

I can see that

- ERR_PTR(-EBUSY) is used when somebody is about to do the
  get_user_pages_remote(). others have to loop until that is resolved.
- ERR_PTR(-EINVAL) is used when the entry gets invalidated by the
  notifier while somebody is about to set it (while still
  ERR_PTR(-EBUSY)). The one currently processing the entry will
  eventually set it back to NULL.

I think we should make this clearer by only setting ERR_PTR(-EINVAL) in
the notifier if already ERR_PTR(-EBUSY), along with a comment.

Can we document the values for map->page and how they are to be handled
right in the struct?
David Hildenbrand Feb. 10, 2020, 12:40 p.m. UTC | #2
[...]

> +void kvm_s390_adapter_gmap_notifier(struct gmap *gmap, unsigned long start,
> +				    unsigned long end)
> +{
> +	struct kvm *kvm = gmap->private;
> +	struct s390_map_info *map, *tmp;
> +	int i;
> +
> +	for (i = 0; i < MAX_S390_IO_ADAPTERS; i++) {
> +		struct s390_io_adapter *adapter = kvm->arch.adapters[i];
> +
> +		if (!adapter)
> +			continue;
> +		spin_lock(&adapter->maps_lock);
> +		list_for_each_entry_safe(map, tmp, &adapter->maps, list) {

list_for_each_entry() is sufficient, we are not removing entries.

> +			if (start <= map->guest_addr && map->guest_addr < end) {
> +				if (IS_ERR(map->page))
> +					map->page = ERR_PTR(-EAGAIN);
> +				else
> +					map->page = NULL;
> +			}
Christian Borntraeger Feb. 10, 2020, 6:38 p.m. UTC | #3
On 10.02.20 13:26, David Hildenbrand wrote:
> On 07.02.20 12:39, Christian Borntraeger wrote:
>> From: Ulrich Weigand <Ulrich.Weigand@de.ibm.com>
>>
>> The adapter interrupt page containing the indicator bits is currently
>> pinned. That means that a guest with many devices can pin a lot of
>> memory pages in the host. This also complicates the reference tracking
>> which is needed for memory management handling of protected virtual
>> machines.
>> We can reuse the pte notifiers to "cache" the page without pinning it.
>>
>> Signed-off-by: Ulrich Weigand <Ulrich.Weigand@de.ibm.com>
>> Suggested-by: Andrea Arcangeli <aarcange@redhat.com>
>> [borntraeger@de.ibm.com: patch merging, splitting, fixing]
>> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
>> ---
> 
> So, instead of pinning explicitly, look up the page address, cache it,
> and glue its lifetime to the gmap table entry. When that entry is
> changed, invalidate the cached page. On re-access, look up the page
> again and register the gmap notifier for the table entry again.

I think I might want to split this into two parts.
part 1: a naive approach that always does get_user_pages_remote/put_page
part 2: do the complex caching

Ulrich mentioned that this actually could make the map/unmap a no-op as we
have the address and bit already in the irq route. In the end this might be
as fast as todays pinning as we replace a list walk with a page table walk. 
Plus it would simplify the code. Will have a look if that is the case.

> 
> [...]
> 
>>  #define MAX_S390_IO_ADAPTERS ((MAX_ISC + 1) * 8)
>> diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
>> index c06c89d370a7..4bfb2f8fe57c 100644
>> --- a/arch/s390/kvm/interrupt.c
>> +++ b/arch/s390/kvm/interrupt.c
>> @@ -28,6 +28,7 @@
>>  #include <asm/switch_to.h>
>>  #include <asm/nmi.h>
>>  #include <asm/airq.h>
>> +#include <linux/pagemap.h>
>>  #include "kvm-s390.h"
>>  #include "gaccess.h"
>>  #include "trace-s390.h"
>> @@ -2328,8 +2329,8 @@ static int register_io_adapter(struct kvm_device *dev,
>>  		return -ENOMEM;
>>  
>>  	INIT_LIST_HEAD(&adapter->maps);
>> -	init_rwsem(&adapter->maps_lock);
>> -	atomic_set(&adapter->nr_maps, 0);
>> +	spin_lock_init(&adapter->maps_lock);
>> +	adapter->nr_maps = 0;
>>  	adapter->id = adapter_info.id;
>>  	adapter->isc = adapter_info.isc;
>>  	adapter->maskable = adapter_info.maskable;
>> @@ -2375,19 +2376,15 @@ static int kvm_s390_adapter_map(struct kvm *kvm, unsigned int id, __u64 addr)
>>  		ret = -EFAULT;
>>  		goto out;
>>  	}
>> -	ret = get_user_pages_fast(map->addr, 1, FOLL_WRITE, &map->page);
>> -	if (ret < 0)
>> -		goto out;
>> -	BUG_ON(ret != 1);
>> -	down_write(&adapter->maps_lock);
>> -	if (atomic_inc_return(&adapter->nr_maps) < MAX_S390_ADAPTER_MAPS) {
>> +	spin_lock(&adapter->maps_lock);
>> +	if (adapter->nr_maps < MAX_S390_ADAPTER_MAPS) {
>> +		adapter->nr_maps++;
>>  		list_add_tail(&map->list, &adapter->maps);
> 
> I do wonder if we should check for duplicates. The unmap path will only
> remove exactly one entry. But maybe this can never happen or is already
> handled on a a higher layer.


This would be a broken userspace, but I also do not see a what would break
in the host if this happens.


> 
>>  }
>> @@ -2430,7 +2426,6 @@ void kvm_s390_destroy_adapters(struct kvm *kvm)
>>  		list_for_each_entry_safe(map, tmp,
>>  					 &kvm->arch.adapters[i]->maps, list) {
>>  			list_del(&map->list);
>> -			put_page(map->page);
>>  			kfree(map);
>>  		}
>>  		kfree(kvm->arch.adapters[i]);
> 
> Between the gmap being removed in kvm_arch_vcpu_destroy() and
> kvm_s390_destroy_adapters(), the entries would no longer properly get
> invalidated. AFAIK, removing/freeing the gmap will not trigger any
> notifiers.
> 
> Not sure if that's an issue (IOW, if we can have some very weird race).
> But I guess we would have similar races already :)

This is only called when all file descriptors are closed and this also closes
all irq routes. So I guess no I/O should be going on any more. 

> 
>> @@ -2690,6 +2685,31 @@ struct kvm_device_ops kvm_flic_ops = {
>>  	.destroy = flic_destroy,
>>  };
>>  
>> +void kvm_s390_adapter_gmap_notifier(struct gmap *gmap, unsigned long start,
>> +				    unsigned long end)
>> +{
>> +	struct kvm *kvm = gmap->private;
>> +	struct s390_map_info *map, *tmp;
>> +	int i;
>> +
>> +	for (i = 0; i < MAX_S390_IO_ADAPTERS; i++) {
>> +		struct s390_io_adapter *adapter = kvm->arch.adapters[i];
>> +
>> +		if (!adapter)
>> +			continue;
> 
> I have to ask very dumb: How is kvm->arch.adapters[] protected?

We only add new ones and this is removed at guest teardown it seems.
[...]

Let me have a look if we can simplify this.
David Hildenbrand Feb. 10, 2020, 7:33 p.m. UTC | #4
> Am 10.02.2020 um 19:41 schrieb Christian Borntraeger <borntraeger@de.ibm.com>:
> 
> 
> 
>> On 10.02.20 13:26, David Hildenbrand wrote:
>>> On 07.02.20 12:39, Christian Borntraeger wrote:
>>> From: Ulrich Weigand <Ulrich.Weigand@de.ibm.com>
>>> 
>>> The adapter interrupt page containing the indicator bits is currently
>>> pinned. That means that a guest with many devices can pin a lot of
>>> memory pages in the host. This also complicates the reference tracking
>>> which is needed for memory management handling of protected virtual
>>> machines.
>>> We can reuse the pte notifiers to "cache" the page without pinning it.
>>> 
>>> Signed-off-by: Ulrich Weigand <Ulrich.Weigand@de.ibm.com>
>>> Suggested-by: Andrea Arcangeli <aarcange@redhat.com>
>>> [borntraeger@de.ibm.com: patch merging, splitting, fixing]
>>> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
>>> ---
>> 
>> So, instead of pinning explicitly, look up the page address, cache it,
>> and glue its lifetime to the gmap table entry. When that entry is
>> changed, invalidate the cached page. On re-access, look up the page
>> again and register the gmap notifier for the table entry again.
> 
> I think I might want to split this into two parts.
> part 1: a naive approach that always does get_user_pages_remote/put_page
> part 2: do the complex caching
> 
> Ulrich mentioned that this actually could make the map/unmap a no-op as we
> have the address and bit already in the irq route. In the end this might be
> as fast as todays pinning as we replace a list walk with a page table walk. 
> Plus it would simplify the code. Will have a look if that is the case.

If we could simplify that heavily, that would be awesome!
diff mbox series

Patch

diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 73044545ecac..884503e05424 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -701,9 +701,9 @@  struct s390_io_adapter {
 	bool masked;
 	bool swap;
 	bool suppressible;
-	struct rw_semaphore maps_lock;
+	spinlock_t maps_lock;
 	struct list_head maps;
-	atomic_t nr_maps;
+	int nr_maps;
 };
 
 #define MAX_S390_IO_ADAPTERS ((MAX_ISC + 1) * 8)
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index c06c89d370a7..4bfb2f8fe57c 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -28,6 +28,7 @@ 
 #include <asm/switch_to.h>
 #include <asm/nmi.h>
 #include <asm/airq.h>
+#include <linux/pagemap.h>
 #include "kvm-s390.h"
 #include "gaccess.h"
 #include "trace-s390.h"
@@ -2328,8 +2329,8 @@  static int register_io_adapter(struct kvm_device *dev,
 		return -ENOMEM;
 
 	INIT_LIST_HEAD(&adapter->maps);
-	init_rwsem(&adapter->maps_lock);
-	atomic_set(&adapter->nr_maps, 0);
+	spin_lock_init(&adapter->maps_lock);
+	adapter->nr_maps = 0;
 	adapter->id = adapter_info.id;
 	adapter->isc = adapter_info.isc;
 	adapter->maskable = adapter_info.maskable;
@@ -2375,19 +2376,15 @@  static int kvm_s390_adapter_map(struct kvm *kvm, unsigned int id, __u64 addr)
 		ret = -EFAULT;
 		goto out;
 	}
-	ret = get_user_pages_fast(map->addr, 1, FOLL_WRITE, &map->page);
-	if (ret < 0)
-		goto out;
-	BUG_ON(ret != 1);
-	down_write(&adapter->maps_lock);
-	if (atomic_inc_return(&adapter->nr_maps) < MAX_S390_ADAPTER_MAPS) {
+	spin_lock(&adapter->maps_lock);
+	if (adapter->nr_maps < MAX_S390_ADAPTER_MAPS) {
+		adapter->nr_maps++;
 		list_add_tail(&map->list, &adapter->maps);
 		ret = 0;
 	} else {
-		put_page(map->page);
 		ret = -EINVAL;
 	}
-	up_write(&adapter->maps_lock);
+	spin_unlock(&adapter->maps_lock);
 out:
 	if (ret)
 		kfree(map);
@@ -2403,18 +2400,17 @@  static int kvm_s390_adapter_unmap(struct kvm *kvm, unsigned int id, __u64 addr)
 	if (!adapter || !addr)
 		return -EINVAL;
 
-	down_write(&adapter->maps_lock);
+	spin_lock(&adapter->maps_lock);
 	list_for_each_entry_safe(map, tmp, &adapter->maps, list) {
 		if (map->guest_addr == addr) {
 			found = 1;
-			atomic_dec(&adapter->nr_maps);
+			adapter->nr_maps--;
 			list_del(&map->list);
-			put_page(map->page);
 			kfree(map);
 			break;
 		}
 	}
-	up_write(&adapter->maps_lock);
+	spin_unlock(&adapter->maps_lock);
 
 	return found ? 0 : -EINVAL;
 }
@@ -2430,7 +2426,6 @@  void kvm_s390_destroy_adapters(struct kvm *kvm)
 		list_for_each_entry_safe(map, tmp,
 					 &kvm->arch.adapters[i]->maps, list) {
 			list_del(&map->list);
-			put_page(map->page);
 			kfree(map);
 		}
 		kfree(kvm->arch.adapters[i]);
@@ -2690,6 +2685,31 @@  struct kvm_device_ops kvm_flic_ops = {
 	.destroy = flic_destroy,
 };
 
+void kvm_s390_adapter_gmap_notifier(struct gmap *gmap, unsigned long start,
+				    unsigned long end)
+{
+	struct kvm *kvm = gmap->private;
+	struct s390_map_info *map, *tmp;
+	int i;
+
+	for (i = 0; i < MAX_S390_IO_ADAPTERS; i++) {
+		struct s390_io_adapter *adapter = kvm->arch.adapters[i];
+
+		if (!adapter)
+			continue;
+		spin_lock(&adapter->maps_lock);
+		list_for_each_entry_safe(map, tmp, &adapter->maps, list) {
+			if (start <= map->guest_addr && map->guest_addr < end) {
+				if (IS_ERR(map->page))
+					map->page = ERR_PTR(-EAGAIN);
+				else
+					map->page = NULL;
+			}
+		}
+		spin_unlock(&adapter->maps_lock);
+	}
+}
+
 static unsigned long get_ind_bit(__u64 addr, unsigned long bit_nr, bool swap)
 {
 	unsigned long bit;
@@ -2699,19 +2719,71 @@  static unsigned long get_ind_bit(__u64 addr, unsigned long bit_nr, bool swap)
 	return swap ? (bit ^ (BITS_PER_LONG - 1)) : bit;
 }
 
-static struct s390_map_info *get_map_info(struct s390_io_adapter *adapter,
-					  u64 addr)
+static struct page *get_map_page(struct kvm *kvm,
+				 struct s390_io_adapter *adapter,
+				 u64 addr)
 {
 	struct s390_map_info *map;
+	unsigned long uaddr;
+	struct page *page;
+	bool need_retry;
+	int ret;
 
 	if (!adapter)
 		return NULL;
+retry:
+	page = NULL;
+	uaddr = 0;
+	spin_lock(&adapter->maps_lock);
+	list_for_each_entry(map, &adapter->maps, list)
+		if (map->guest_addr == addr) {
+			uaddr = map->addr;
+			page = map->page;
+			if (!page)
+				map->page = ERR_PTR(-EBUSY);
+			else if (IS_ERR(page) || !page_cache_get_speculative(page)) {
+				spin_unlock(&adapter->maps_lock);
+				goto retry;
+			}
+			break;
+		}
+	spin_unlock(&adapter->maps_lock);
+
+	if (page)
+		return page;
+	if (!uaddr)
+		return NULL;
 
-	list_for_each_entry(map, &adapter->maps, list) {
-		if (map->guest_addr == addr)
-			return map;
+	down_read(&kvm->mm->mmap_sem);
+	ret = set_pgste_bits(kvm->mm, uaddr, PGSTE_IN_BIT, PGSTE_IN_BIT);
+	if (ret)
+		goto fail;
+	ret = get_user_pages_remote(NULL, kvm->mm, uaddr, 1, FOLL_WRITE,
+				    &page, NULL, NULL);
+	if (ret < 1)
+		page = NULL;
+fail:
+	up_read(&kvm->mm->mmap_sem);
+	need_retry = true;
+	spin_lock(&adapter->maps_lock);
+	list_for_each_entry(map, &adapter->maps, list)
+		if (map->guest_addr == addr) {
+			if (map->page == ERR_PTR(-EBUSY)) {
+				map->page = page;
+				need_retry = false;
+			} else if (IS_ERR(map->page)) {
+				map->page = NULL;
+			}
+			break;
+		}
+	spin_unlock(&adapter->maps_lock);
+	if (need_retry) {
+		if (page)
+			put_page(page);
+		goto retry;
 	}
-	return NULL;
+
+	return page;
 }
 
 static int adapter_indicators_set(struct kvm *kvm,
@@ -2720,30 +2792,35 @@  static int adapter_indicators_set(struct kvm *kvm,
 {
 	unsigned long bit;
 	int summary_set, idx;
-	struct s390_map_info *info;
+	struct page *ind_page, *summary_page;
 	void *map;
 
-	info = get_map_info(adapter, adapter_int->ind_addr);
-	if (!info)
+	ind_page = get_map_page(kvm, adapter, adapter_int->ind_addr);
+	if (!ind_page)
 		return -1;
-	map = page_address(info->page);
-	bit = get_ind_bit(info->addr, adapter_int->ind_offset, adapter->swap);
-	set_bit(bit, map);
-	idx = srcu_read_lock(&kvm->srcu);
-	mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT);
-	set_page_dirty_lock(info->page);
-	info = get_map_info(adapter, adapter_int->summary_addr);
-	if (!info) {
-		srcu_read_unlock(&kvm->srcu, idx);
+	summary_page = get_map_page(kvm, adapter, adapter_int->summary_addr);
+	if (!summary_page) {
+		put_page(ind_page);
 		return -1;
 	}
-	map = page_address(info->page);
-	bit = get_ind_bit(info->addr, adapter_int->summary_offset,
-			  adapter->swap);
+
+	idx = srcu_read_lock(&kvm->srcu);
+	map = page_address(ind_page);
+	bit = get_ind_bit(adapter_int->ind_addr,
+			  adapter_int->ind_offset, adapter->swap);
+	set_bit(bit, map);
+	mark_page_dirty(kvm, adapter_int->ind_addr >> PAGE_SHIFT);
+	set_page_dirty_lock(ind_page);
+	map = page_address(summary_page);
+	bit = get_ind_bit(adapter_int->summary_addr,
+			  adapter_int->summary_offset, adapter->swap);
 	summary_set = test_and_set_bit(bit, map);
-	mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT);
-	set_page_dirty_lock(info->page);
+	mark_page_dirty(kvm, adapter_int->summary_addr >> PAGE_SHIFT);
+	set_page_dirty_lock(summary_page);
 	srcu_read_unlock(&kvm->srcu, idx);
+
+	put_page(ind_page);
+	put_page(summary_page);
 	return summary_set ? 0 : 1;
 }
 
@@ -2765,9 +2842,7 @@  static int set_adapter_int(struct kvm_kernel_irq_routing_entry *e,
 	adapter = get_io_adapter(kvm, e->adapter.adapter_id);
 	if (!adapter)
 		return -1;
-	down_read(&adapter->maps_lock);
 	ret = adapter_indicators_set(kvm, adapter, &e->adapter);
-	up_read(&adapter->maps_lock);
 	if ((ret > 0) && !adapter->masked) {
 		ret = kvm_s390_inject_airq(kvm, adapter);
 		if (ret == 0)
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index e39f6ef97b09..1a48214ac507 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -219,6 +219,7 @@  static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
 
 static struct gmap_notifier gmap_notifier;
 static struct gmap_notifier vsie_gmap_notifier;
+static struct gmap_notifier adapter_gmap_notifier;
 debug_info_t *kvm_s390_dbf;
 
 /* Section: not file related */
@@ -299,6 +300,8 @@  int kvm_arch_hardware_setup(void)
 	gmap_register_pte_notifier(&gmap_notifier);
 	vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
 	gmap_register_pte_notifier(&vsie_gmap_notifier);
+	adapter_gmap_notifier.notifier_call = kvm_s390_adapter_gmap_notifier;
+	gmap_register_pte_notifier(&adapter_gmap_notifier);
 	atomic_notifier_chain_register(&s390_epoch_delta_notifier,
 				       &kvm_clock_notifier);
 	return 0;
@@ -308,6 +311,7 @@  void kvm_arch_hardware_unsetup(void)
 {
 	gmap_unregister_pte_notifier(&gmap_notifier);
 	gmap_unregister_pte_notifier(&vsie_gmap_notifier);
+	gmap_unregister_pte_notifier(&adapter_gmap_notifier);
 	atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
 					 &kvm_clock_notifier);
 }
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index 6d9448dbd052..54c5eb4b275d 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -367,6 +367,8 @@  int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
 			struct kvm_s390_irq *s390irq);
 
 /* implemented in interrupt.c */
+void kvm_s390_adapter_gmap_notifier(struct gmap *gmap, unsigned long start,
+				    unsigned long end);
 int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop);
 int psw_extint_disabled(struct kvm_vcpu *vcpu);
 void kvm_s390_destroy_adapters(struct kvm *kvm);