diff mbox

[11/12] KVM: x86: add KVM_MEM_X86_SMRAM memory slot flag

Message ID 1431084034-8425-12-git-send-email-pbonzini@redhat.com (mailing list archive)
State New, archived
Headers show

Commit Message

Paolo Bonzini May 8, 2015, 11:20 a.m. UTC
This adds an arch-specific memslot flag that hides slots unless the
VCPU is in system management mode.

Some care is needed in order to limit the overhead of x86_gfn_to_memslot
when compared with gfn_to_memslot.  Thankfully, we have __gfn_to_memslot
and search_memslots which are the same, so we can add some extra output
to search_memslots.  The compiler will optimize it as dead code in
__gfn_to_memslot, and will use it to thread jumps in x86_gfn_to_memslot.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 Documentation/virtual/kvm/api.txt | 18 ++++++++++++------
 arch/x86/include/uapi/asm/kvm.h   |  3 +++
 arch/x86/kvm/smram.c              | 25 +++++++++++++++++++++++--
 include/linux/kvm_host.h          | 14 ++++++++++----
 virt/kvm/kvm_main.c               |  4 ++++
 5 files changed, 52 insertions(+), 12 deletions(-)

Comments

Avi Kivity May 26, 2015, 6:45 p.m. UTC | #1
On 05/08/2015 02:20 PM, Paolo Bonzini wrote:
> This adds an arch-specific memslot flag that hides slots unless the
> VCPU is in system management mode.
>
> Some care is needed in order to limit the overhead of x86_gfn_to_memslot
> when compared with gfn_to_memslot.  Thankfully, we have __gfn_to_memslot
> and search_memslots which are the same, so we can add some extra output
> to search_memslots.  The compiler will optimize it as dead code in
> __gfn_to_memslot, and will use it to thread jumps in x86_gfn_to_memslot.
>
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> ---
>   Documentation/virtual/kvm/api.txt | 18 ++++++++++++------
>   arch/x86/include/uapi/asm/kvm.h   |  3 +++
>   arch/x86/kvm/smram.c              | 25 +++++++++++++++++++++++--
>   include/linux/kvm_host.h          | 14 ++++++++++----
>   virt/kvm/kvm_main.c               |  4 ++++
>   5 files changed, 52 insertions(+), 12 deletions(-)
>
> diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
> index 51523b70b6b2..2bc99ae040da 100644
> --- a/Documentation/virtual/kvm/api.txt
> +++ b/Documentation/virtual/kvm/api.txt
> @@ -933,18 +933,24 @@ It is recommended that the lower 21 bits of guest_phys_addr and userspace_addr
>   be identical.  This allows large pages in the guest to be backed by large
>   pages in the host.
>   
> -The flags field supports two flags: KVM_MEM_LOG_DIRTY_PAGES and
> -KVM_MEM_READONLY.  The former can be set to instruct KVM to keep track of
> -writes to memory within the slot.  See KVM_GET_DIRTY_LOG ioctl to know how to
> -use it.  The latter can be set, if KVM_CAP_READONLY_MEM capability allows it,
> -to make a new slot read-only.  In this case, writes to this memory will be
> -posted to userspace as KVM_EXIT_MMIO exits.
> +The flags field supports two architecture-independent flags:
> +KVM_MEM_LOG_DIRTY_PAGES and KVM_MEM_READONLY.  The former can be set
> +to instruct KVM to keep track of writes to memory within the slot.
> +See KVM_GET_DIRTY_LOG ioctl to know how to use it.  The latter can
> +be set, if KVM_CAP_READONLY_MEM capability allows it, to make a new
> +slot read-only.  In this case, writes to this memory will be posted to
> +userspace as KVM_EXIT_MMIO exits.
>   
>   When the KVM_CAP_SYNC_MMU capability is available, changes in the backing of
>   the memory region are automatically reflected into the guest.  For example, an
>   mmap() that affects the region will be made visible immediately.  Another
>   example is madvise(MADV_DROP).
>   
> +Each architectures can support other specific flags.  Right now there is
> +only one defined.  On x86, if KVM_CAP_X86_SMM is available, the
> +KVM_MEM_X86_SMRAM flag will hide the slot to VCPUs that are not
> +in system management mode.

Is this generic enough?  For example, a system could configure itself so 
that an SMRAM region goes to mmio, hiding real RAM.

I see two alternatives:

- have three states: SMM, !SMM, both
- define two tables: SMM, !SMM, both spanning the entire address space

you should probably document how dirty bitmap handling happens in the 
presence of SMM.

> +
>   It is recommended to use this API instead of the KVM_SET_MEMORY_REGION ioctl.
>   The KVM_SET_MEMORY_REGION does not allow fine grained control over memory
>   allocation and is deprecated.
> diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h
> index 30100a3c1bed..46df15bc844f 100644
> --- a/arch/x86/include/uapi/asm/kvm.h
> +++ b/arch/x86/include/uapi/asm/kvm.h
> @@ -45,6 +45,9 @@
>   #define __KVM_HAVE_XCRS
>   #define __KVM_HAVE_READONLY_MEM
>   
> +#define __KVM_ARCH_VALID_FLAGS		KVM_MEM_X86_SMRAM
> +#define KVM_MEM_X86_SMRAM		(1 << 24)
> +
>   /* Architectural interrupt line count. */
>   #define KVM_NR_INTERRUPTS 256
>   
> diff --git a/arch/x86/kvm/smram.c b/arch/x86/kvm/smram.c
> index 73616edab631..e7dd933673a4 100644
> --- a/arch/x86/kvm/smram.c
> +++ b/arch/x86/kvm/smram.c
> @@ -19,10 +19,23 @@
>   
>   #include <linux/module.h>
>   #include <linux/kvm_host.h>
> +#include "kvm_cache_regs.h"
>   
>   struct kvm_memory_slot *x86_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn)
>   {
> -	struct kvm_memory_slot *slot = gfn_to_memslot(vcpu->kvm, gfn);
> +	/* By using search_memslots directly the compiler can optimize away
> +	 * the "if (found)" check below.
> +         *
> +	 * It cannot do the same for gfn_to_memslot because it is not inlined,
> +	 * and it also cannot do the same for __gfn_to_memslot because the
> +	 * kernel is compiled with -fno-delete-null-pointer-checks.
> +	 */
> +	bool found;
> +	struct kvm_memslots *memslots = kvm_memslots(vcpu->kvm);
> +	struct kvm_memory_slot *slot = search_memslots(memslots, gfn, &found);
> +
> +	if (found && unlikely(slot->flags & KVM_MEM_X86_SMRAM) && !is_smm(vcpu))
> +		return NULL;
>   
>   	return slot;
>   }
> @@ -112,7 +125,15 @@ EXPORT_SYMBOL_GPL(x86_read_guest);
>   int x86_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
>   			      gpa_t gpa, unsigned long len)
>   {
> -	return kvm_gfn_to_hva_cache_init(kvm, ghc, gpa, len);
> +	int r = kvm_gfn_to_hva_cache_init(kvm, ghc, gpa, len);
> +
> +	if (r < 0)
> +		return r;
> +
> +	/* Use slow path for reads and writes to SMRAM.  */
> +	if (ghc->memslot && (ghc->memslot->flags & KVM_MEM_X86_SMRAM))
> +		ghc->memslot = NULL;
> +	return r;
>   }
>   EXPORT_SYMBOL_GPL(x86_gfn_to_hva_cache_init);
>   
> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
> index 19d09a08885b..ae7c60262369 100644
> --- a/include/linux/kvm_host.h
> +++ b/include/linux/kvm_host.h
> @@ -810,16 +810,18 @@ static inline void kvm_guest_exit(void)
>    * gfn_to_memslot() itself isn't here as an inline because that would
>    * bloat other code too much.
>    */
> -static inline struct kvm_memory_slot *
> -search_memslots(struct kvm_memslots *slots, gfn_t gfn)
> +static __always_inline struct kvm_memory_slot *
> +search_memslots(struct kvm_memslots *slots, gfn_t gfn, bool *found)
>   {
>   	int start = 0, end = slots->used_slots;
>   	int slot = atomic_read(&slots->lru_slot);
>   	struct kvm_memory_slot *memslots = slots->memslots;
>   
>   	if (gfn >= memslots[slot].base_gfn &&
> -	    gfn < memslots[slot].base_gfn + memslots[slot].npages)
> +	    gfn < memslots[slot].base_gfn + memslots[slot].npages) {
> +		*found = true;
>   		return &memslots[slot];
> +	}
>   
>   	while (start < end) {
>   		slot = start + (end - start) / 2;
> @@ -833,16 +835,20 @@ search_memslots(struct kvm_memslots *slots, gfn_t gfn)
>   	if (gfn >= memslots[start].base_gfn &&
>   	    gfn < memslots[start].base_gfn + memslots[start].npages) {
>   		atomic_set(&slots->lru_slot, start);
> +		*found = true;
>   		return &memslots[start];
>   	}
>   
> +	*found = false;
>   	return NULL;
>   }
>   
>   static inline struct kvm_memory_slot *
>   __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
>   {
> -	return search_memslots(slots, gfn);
> +	bool found;
> +
> +	return search_memslots(slots, gfn, &found);
>   }
>   
>   static inline unsigned long
> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> index 0fcc5d28f3a9..46bff2082479 100644
> --- a/virt/kvm/kvm_main.c
> +++ b/virt/kvm/kvm_main.c
> @@ -716,6 +716,10 @@ static int check_memory_region_flags(struct kvm_userspace_memory_region *mem)
>   #ifdef __KVM_HAVE_READONLY_MEM
>   	valid_flags |= KVM_MEM_READONLY;
>   #endif
> +#ifdef __KVM_ARCH_VALID_FLAGS
> +	BUILD_BUG_ON(__KVM_ARCH_VALID_FLAGS & KVM_MEMSLOT_INVALID);
> +	valid_flags |= __KVM_ARCH_VALID_FLAGS;
> +#endif
>   
>   	if (mem->flags & ~valid_flags)
>   		return -EINVAL;

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Paolo Bonzini May 27, 2015, 9:26 a.m. UTC | #2
On 26/05/2015 20:45, Avi Kivity wrote:
> Is this generic enough?  For example, a system could configure itself so
> that an SMRAM region goes to mmio, hiding real RAM.

That would work, because in !SMM you'd go to userspace and do MMIO
there.  But this is absolutely not generic enough.  Your proposed
alternative of having two tables is really neat to implement in both KVM
and QEMU.

Paolo

> 
> I see two alternatives:
> 
> - have three states: SMM, !SMM, both
> - define two tables: SMM, !SMM, both spanning the entire address space
> 
> you should probably document how dirty bitmap handling happens in the
> presence of SMM.
> 
>> +
>>   It is recommended to use this API instead of the
>> KVM_SET_MEMORY_REGION ioctl.
>>   The KVM_SET_MEMORY_REGION does not allow fine grained control over
>> memory
>>   allocation and is deprecated.
>> diff --git a/arch/x86/include/uapi/asm/kvm.h
>> b/arch/x86/include/uapi/asm/kvm.h
>> index 30100a3c1bed..46df15bc844f 100644
>> --- a/arch/x86/include/uapi/asm/kvm.h
>> +++ b/arch/x86/include/uapi/asm/kvm.h
>> @@ -45,6 +45,9 @@
>>   #define __KVM_HAVE_XCRS
>>   #define __KVM_HAVE_READONLY_MEM
>>   +#define __KVM_ARCH_VALID_FLAGS        KVM_MEM_X86_SMRAM
>> +#define KVM_MEM_X86_SMRAM        (1 << 24)
>> +
>>   /* Architectural interrupt line count. */
>>   #define KVM_NR_INTERRUPTS 256
>>   diff --git a/arch/x86/kvm/smram.c b/arch/x86/kvm/smram.c
>> index 73616edab631..e7dd933673a4 100644
>> --- a/arch/x86/kvm/smram.c
>> +++ b/arch/x86/kvm/smram.c
>> @@ -19,10 +19,23 @@
>>     #include <linux/module.h>
>>   #include <linux/kvm_host.h>
>> +#include "kvm_cache_regs.h"
>>     struct kvm_memory_slot *x86_gfn_to_memslot(struct kvm_vcpu *vcpu,
>> gfn_t gfn)
>>   {
>> -    struct kvm_memory_slot *slot = gfn_to_memslot(vcpu->kvm, gfn);
>> +    /* By using search_memslots directly the compiler can optimize away
>> +     * the "if (found)" check below.
>> +         *
>> +     * It cannot do the same for gfn_to_memslot because it is not
>> inlined,
>> +     * and it also cannot do the same for __gfn_to_memslot because the
>> +     * kernel is compiled with -fno-delete-null-pointer-checks.
>> +     */
>> +    bool found;
>> +    struct kvm_memslots *memslots = kvm_memslots(vcpu->kvm);
>> +    struct kvm_memory_slot *slot = search_memslots(memslots, gfn,
>> &found);
>> +
>> +    if (found && unlikely(slot->flags & KVM_MEM_X86_SMRAM) &&
>> !is_smm(vcpu))
>> +        return NULL;
>>         return slot;
>>   }
>> @@ -112,7 +125,15 @@ EXPORT_SYMBOL_GPL(x86_read_guest);
>>   int x86_gfn_to_hva_cache_init(struct kvm *kvm, struct
>> gfn_to_hva_cache *ghc,
>>                     gpa_t gpa, unsigned long len)
>>   {
>> -    return kvm_gfn_to_hva_cache_init(kvm, ghc, gpa, len);
>> +    int r = kvm_gfn_to_hva_cache_init(kvm, ghc, gpa, len);
>> +
>> +    if (r < 0)
>> +        return r;
>> +
>> +    /* Use slow path for reads and writes to SMRAM.  */
>> +    if (ghc->memslot && (ghc->memslot->flags & KVM_MEM_X86_SMRAM))
>> +        ghc->memslot = NULL;
>> +    return r;
>>   }
>>   EXPORT_SYMBOL_GPL(x86_gfn_to_hva_cache_init);
>>   diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
>> index 19d09a08885b..ae7c60262369 100644
>> --- a/include/linux/kvm_host.h
>> +++ b/include/linux/kvm_host.h
>> @@ -810,16 +810,18 @@ static inline void kvm_guest_exit(void)
>>    * gfn_to_memslot() itself isn't here as an inline because that would
>>    * bloat other code too much.
>>    */
>> -static inline struct kvm_memory_slot *
>> -search_memslots(struct kvm_memslots *slots, gfn_t gfn)
>> +static __always_inline struct kvm_memory_slot *
>> +search_memslots(struct kvm_memslots *slots, gfn_t gfn, bool *found)
>>   {
>>       int start = 0, end = slots->used_slots;
>>       int slot = atomic_read(&slots->lru_slot);
>>       struct kvm_memory_slot *memslots = slots->memslots;
>>         if (gfn >= memslots[slot].base_gfn &&
>> -        gfn < memslots[slot].base_gfn + memslots[slot].npages)
>> +        gfn < memslots[slot].base_gfn + memslots[slot].npages) {
>> +        *found = true;
>>           return &memslots[slot];
>> +    }
>>         while (start < end) {
>>           slot = start + (end - start) / 2;
>> @@ -833,16 +835,20 @@ search_memslots(struct kvm_memslots *slots,
>> gfn_t gfn)
>>       if (gfn >= memslots[start].base_gfn &&
>>           gfn < memslots[start].base_gfn + memslots[start].npages) {
>>           atomic_set(&slots->lru_slot, start);
>> +        *found = true;
>>           return &memslots[start];
>>       }
>>   +    *found = false;
>>       return NULL;
>>   }
>>     static inline struct kvm_memory_slot *
>>   __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
>>   {
>> -    return search_memslots(slots, gfn);
>> +    bool found;
>> +
>> +    return search_memslots(slots, gfn, &found);
>>   }
>>     static inline unsigned long
>> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
>> index 0fcc5d28f3a9..46bff2082479 100644
>> --- a/virt/kvm/kvm_main.c
>> +++ b/virt/kvm/kvm_main.c
>> @@ -716,6 +716,10 @@ static int check_memory_region_flags(struct
>> kvm_userspace_memory_region *mem)
>>   #ifdef __KVM_HAVE_READONLY_MEM
>>       valid_flags |= KVM_MEM_READONLY;
>>   #endif
>> +#ifdef __KVM_ARCH_VALID_FLAGS
>> +    BUILD_BUG_ON(__KVM_ARCH_VALID_FLAGS & KVM_MEMSLOT_INVALID);
>> +    valid_flags |= __KVM_ARCH_VALID_FLAGS;
>> +#endif
>>         if (mem->flags & ~valid_flags)
>>           return -EINVAL;
> 
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 51523b70b6b2..2bc99ae040da 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -933,18 +933,24 @@  It is recommended that the lower 21 bits of guest_phys_addr and userspace_addr
 be identical.  This allows large pages in the guest to be backed by large
 pages in the host.
 
-The flags field supports two flags: KVM_MEM_LOG_DIRTY_PAGES and
-KVM_MEM_READONLY.  The former can be set to instruct KVM to keep track of
-writes to memory within the slot.  See KVM_GET_DIRTY_LOG ioctl to know how to
-use it.  The latter can be set, if KVM_CAP_READONLY_MEM capability allows it,
-to make a new slot read-only.  In this case, writes to this memory will be
-posted to userspace as KVM_EXIT_MMIO exits.
+The flags field supports two architecture-independent flags:
+KVM_MEM_LOG_DIRTY_PAGES and KVM_MEM_READONLY.  The former can be set
+to instruct KVM to keep track of writes to memory within the slot.
+See KVM_GET_DIRTY_LOG ioctl to know how to use it.  The latter can
+be set, if KVM_CAP_READONLY_MEM capability allows it, to make a new
+slot read-only.  In this case, writes to this memory will be posted to
+userspace as KVM_EXIT_MMIO exits.
 
 When the KVM_CAP_SYNC_MMU capability is available, changes in the backing of
 the memory region are automatically reflected into the guest.  For example, an
 mmap() that affects the region will be made visible immediately.  Another
 example is madvise(MADV_DROP).
 
+Each architectures can support other specific flags.  Right now there is
+only one defined.  On x86, if KVM_CAP_X86_SMM is available, the
+KVM_MEM_X86_SMRAM flag will hide the slot to VCPUs that are not
+in system management mode.
+
 It is recommended to use this API instead of the KVM_SET_MEMORY_REGION ioctl.
 The KVM_SET_MEMORY_REGION does not allow fine grained control over memory
 allocation and is deprecated.
diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h
index 30100a3c1bed..46df15bc844f 100644
--- a/arch/x86/include/uapi/asm/kvm.h
+++ b/arch/x86/include/uapi/asm/kvm.h
@@ -45,6 +45,9 @@ 
 #define __KVM_HAVE_XCRS
 #define __KVM_HAVE_READONLY_MEM
 
+#define __KVM_ARCH_VALID_FLAGS		KVM_MEM_X86_SMRAM
+#define KVM_MEM_X86_SMRAM		(1 << 24)
+
 /* Architectural interrupt line count. */
 #define KVM_NR_INTERRUPTS 256
 
diff --git a/arch/x86/kvm/smram.c b/arch/x86/kvm/smram.c
index 73616edab631..e7dd933673a4 100644
--- a/arch/x86/kvm/smram.c
+++ b/arch/x86/kvm/smram.c
@@ -19,10 +19,23 @@ 
 
 #include <linux/module.h>
 #include <linux/kvm_host.h>
+#include "kvm_cache_regs.h"
 
 struct kvm_memory_slot *x86_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn)
 {
-	struct kvm_memory_slot *slot = gfn_to_memslot(vcpu->kvm, gfn);
+	/* By using search_memslots directly the compiler can optimize away
+	 * the "if (found)" check below.
+         *
+	 * It cannot do the same for gfn_to_memslot because it is not inlined,
+	 * and it also cannot do the same for __gfn_to_memslot because the
+	 * kernel is compiled with -fno-delete-null-pointer-checks.
+	 */
+	bool found;
+	struct kvm_memslots *memslots = kvm_memslots(vcpu->kvm);
+	struct kvm_memory_slot *slot = search_memslots(memslots, gfn, &found);
+
+	if (found && unlikely(slot->flags & KVM_MEM_X86_SMRAM) && !is_smm(vcpu))
+		return NULL;
 
 	return slot;
 }
@@ -112,7 +125,15 @@  EXPORT_SYMBOL_GPL(x86_read_guest);
 int x86_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
 			      gpa_t gpa, unsigned long len)
 {
-	return kvm_gfn_to_hva_cache_init(kvm, ghc, gpa, len);
+	int r = kvm_gfn_to_hva_cache_init(kvm, ghc, gpa, len);
+
+	if (r < 0)
+		return r;
+
+	/* Use slow path for reads and writes to SMRAM.  */
+	if (ghc->memslot && (ghc->memslot->flags & KVM_MEM_X86_SMRAM))
+		ghc->memslot = NULL;
+	return r;
 }
 EXPORT_SYMBOL_GPL(x86_gfn_to_hva_cache_init);
 
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 19d09a08885b..ae7c60262369 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -810,16 +810,18 @@  static inline void kvm_guest_exit(void)
  * gfn_to_memslot() itself isn't here as an inline because that would
  * bloat other code too much.
  */
-static inline struct kvm_memory_slot *
-search_memslots(struct kvm_memslots *slots, gfn_t gfn)
+static __always_inline struct kvm_memory_slot *
+search_memslots(struct kvm_memslots *slots, gfn_t gfn, bool *found)
 {
 	int start = 0, end = slots->used_slots;
 	int slot = atomic_read(&slots->lru_slot);
 	struct kvm_memory_slot *memslots = slots->memslots;
 
 	if (gfn >= memslots[slot].base_gfn &&
-	    gfn < memslots[slot].base_gfn + memslots[slot].npages)
+	    gfn < memslots[slot].base_gfn + memslots[slot].npages) {
+		*found = true;
 		return &memslots[slot];
+	}
 
 	while (start < end) {
 		slot = start + (end - start) / 2;
@@ -833,16 +835,20 @@  search_memslots(struct kvm_memslots *slots, gfn_t gfn)
 	if (gfn >= memslots[start].base_gfn &&
 	    gfn < memslots[start].base_gfn + memslots[start].npages) {
 		atomic_set(&slots->lru_slot, start);
+		*found = true;
 		return &memslots[start];
 	}
 
+	*found = false;
 	return NULL;
 }
 
 static inline struct kvm_memory_slot *
 __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
 {
-	return search_memslots(slots, gfn);
+	bool found;
+
+	return search_memslots(slots, gfn, &found);
 }
 
 static inline unsigned long
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 0fcc5d28f3a9..46bff2082479 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -716,6 +716,10 @@  static int check_memory_region_flags(struct kvm_userspace_memory_region *mem)
 #ifdef __KVM_HAVE_READONLY_MEM
 	valid_flags |= KVM_MEM_READONLY;
 #endif
+#ifdef __KVM_ARCH_VALID_FLAGS
+	BUILD_BUG_ON(__KVM_ARCH_VALID_FLAGS & KVM_MEMSLOT_INVALID);
+	valid_flags |= __KVM_ARCH_VALID_FLAGS;
+#endif
 
 	if (mem->flags & ~valid_flags)
 		return -EINVAL;