diff mbox series

[RFC,09/16] KVM: Protected memory extension

Message ID 20200522125214.31348-10-kirill.shutemov@linux.intel.com (mailing list archive)
State New, archived
Headers show
Series KVM protected memory extension | expand

Commit Message

Kirill A . Shutemov May 22, 2020, 12:52 p.m. UTC
Add infrastructure that handles protected memory extension.

Arch-specific code has to provide hypercalls and define non-zero
VM_KVM_PROTECTED.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
---
 include/linux/kvm_host.h |   4 ++
 mm/mprotect.c            |   1 +
 virt/kvm/kvm_main.c      | 131 +++++++++++++++++++++++++++++++++++++++
 3 files changed, 136 insertions(+)

Comments

Vitaly Kuznetsov May 25, 2020, 3:26 p.m. UTC | #1
"Kirill A. Shutemov" <kirill@shutemov.name> writes:

> Add infrastructure that handles protected memory extension.
>
> Arch-specific code has to provide hypercalls and define non-zero
> VM_KVM_PROTECTED.
>
> Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
> ---
>  include/linux/kvm_host.h |   4 ++
>  mm/mprotect.c            |   1 +
>  virt/kvm/kvm_main.c      | 131 +++++++++++++++++++++++++++++++++++++++
>  3 files changed, 136 insertions(+)
>
> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
> index bd0bb600f610..d7072f6d6aa0 100644
> --- a/include/linux/kvm_host.h
> +++ b/include/linux/kvm_host.h
> @@ -700,6 +700,10 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm);
>  void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
>  				   struct kvm_memory_slot *slot);
>  
> +int kvm_protect_all_memory(struct kvm *kvm);
> +int kvm_protect_memory(struct kvm *kvm,
> +		       unsigned long gfn, unsigned long npages, bool protect);
> +
>  int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
>  			    struct page **pages, int nr_pages);
>  
> diff --git a/mm/mprotect.c b/mm/mprotect.c
> index 494192ca954b..552be3b4c80a 100644
> --- a/mm/mprotect.c
> +++ b/mm/mprotect.c
> @@ -505,6 +505,7 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
>  	vm_unacct_memory(charged);
>  	return error;
>  }
> +EXPORT_SYMBOL_GPL(mprotect_fixup);
>  
>  /*
>   * pkey==-1 when doing a legacy mprotect()
> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> index 530af95efdf3..07d45da5d2aa 100644
> --- a/virt/kvm/kvm_main.c
> +++ b/virt/kvm/kvm_main.c
> @@ -155,6 +155,8 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm);
>  static unsigned long long kvm_createvm_count;
>  static unsigned long long kvm_active_vms;
>  
> +static int protect_memory(unsigned long start, unsigned long end, bool protect);
> +
>  __weak int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
>  		unsigned long start, unsigned long end, bool blockable)
>  {
> @@ -1309,6 +1311,14 @@ int __kvm_set_memory_region(struct kvm *kvm,
>  	if (r)
>  		goto out_bitmap;
>  
> +	if (mem->memory_size && kvm->mem_protected) {
> +		r = protect_memory(new.userspace_addr,
> +				   new.userspace_addr + new.npages * PAGE_SIZE,
> +				   true);
> +		if (r)
> +			goto out_bitmap;
> +	}
> +
>  	if (old.dirty_bitmap && !new.dirty_bitmap)
>  		kvm_destroy_dirty_bitmap(&old);
>  	return 0;
> @@ -2652,6 +2662,127 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn)
>  }
>  EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty);
>  
> +static int protect_memory(unsigned long start, unsigned long end, bool protect)
> +{
> +	struct mm_struct *mm = current->mm;
> +	struct vm_area_struct *vma, *prev;
> +	int ret;
> +
> +	if (down_write_killable(&mm->mmap_sem))
> +		return -EINTR;
> +
> +	ret = -ENOMEM;
> +	vma = find_vma(current->mm, start);
> +	if (!vma)
> +		goto out;
> +
> +	ret = -EINVAL;
> +	if (vma->vm_start > start)
> +		goto out;
> +
> +	if (start > vma->vm_start)
> +		prev = vma;
> +	else
> +		prev = vma->vm_prev;
> +
> +	ret = 0;
> +	while (true) {
> +		unsigned long newflags, tmp;
> +
> +		tmp = vma->vm_end;
> +		if (tmp > end)
> +			tmp = end;
> +
> +		newflags = vma->vm_flags;
> +		if (protect)
> +			newflags |= VM_KVM_PROTECTED;
> +		else
> +			newflags &= ~VM_KVM_PROTECTED;
> +
> +		/* The VMA has been handled as part of other memslot */
> +		if (newflags == vma->vm_flags)
> +			goto next;
> +
> +		ret = mprotect_fixup(vma, &prev, start, tmp, newflags);
> +		if (ret)
> +			goto out;
> +
> +next:
> +		start = tmp;
> +		if (start < prev->vm_end)
> +			start = prev->vm_end;
> +
> +		if (start >= end)
> +			goto out;
> +
> +		vma = prev->vm_next;
> +		if (!vma || vma->vm_start != start) {
> +			ret = -ENOMEM;
> +			goto out;
> +		}
> +	}
> +out:
> +	up_write(&mm->mmap_sem);
> +	return ret;
> +}
> +
> +int kvm_protect_memory(struct kvm *kvm,
> +		       unsigned long gfn, unsigned long npages, bool protect)
> +{
> +	struct kvm_memory_slot *memslot;
> +	unsigned long start, end;
> +	gfn_t numpages;
> +
> +	if (!VM_KVM_PROTECTED)
> +		return -KVM_ENOSYS;
> +
> +	if (!npages)
> +		return 0;
> +
> +	memslot = gfn_to_memslot(kvm, gfn);
> +	/* Not backed by memory. It's okay. */
> +	if (!memslot)
> +		return 0;
> +
> +	start = gfn_to_hva_many(memslot, gfn, &numpages);
> +	end = start + npages * PAGE_SIZE;
> +
> +	/* XXX: Share range across memory slots? */
> +	if (WARN_ON(numpages < npages))
> +		return -EINVAL;
> +
> +	return protect_memory(start, end, protect);
> +}
> +EXPORT_SYMBOL_GPL(kvm_protect_memory);
> +
> +int kvm_protect_all_memory(struct kvm *kvm)
> +{
> +	struct kvm_memslots *slots;
> +	struct kvm_memory_slot *memslot;
> +	unsigned long start, end;
> +	int i, ret = 0;;
> +
> +	if (!VM_KVM_PROTECTED)
> +		return -KVM_ENOSYS;
> +
> +	mutex_lock(&kvm->slots_lock);
> +	kvm->mem_protected = true;

What will happen upon guest reboot? Do we need to unprotect everything
to make sure we'll be able to boot? Also, after the reboot how will the
guest know that it is protected and needs to unprotect things? -> see my
idea about converting KVM_HC_ENABLE_MEM_PROTECTED to a stateful MSR (but
we'll likely have to reset it upon reboot anyway).

> +	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
> +		slots = __kvm_memslots(kvm, i);
> +		kvm_for_each_memslot(memslot, slots) {
> +			start = memslot->userspace_addr;
> +			end = start + memslot->npages * PAGE_SIZE;
> +			ret = protect_memory(start, end, true);
> +			if (ret)
> +				goto out;
> +		}
> +	}
> +out:
> +	mutex_unlock(&kvm->slots_lock);
> +	return ret;
> +}
> +EXPORT_SYMBOL_GPL(kvm_protect_all_memory);
> +
>  void kvm_sigset_activate(struct kvm_vcpu *vcpu)
>  {
>  	if (!vcpu->sigset_active)
Kirill A . Shutemov May 25, 2020, 3:34 p.m. UTC | #2
On Mon, May 25, 2020 at 05:26:37PM +0200, Vitaly Kuznetsov wrote:
> "Kirill A. Shutemov" <kirill@shutemov.name> writes:
> 
> > Add infrastructure that handles protected memory extension.
> >
> > Arch-specific code has to provide hypercalls and define non-zero
> > VM_KVM_PROTECTED.
> >
> > Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
> > ---
> >  include/linux/kvm_host.h |   4 ++
> >  mm/mprotect.c            |   1 +
> >  virt/kvm/kvm_main.c      | 131 +++++++++++++++++++++++++++++++++++++++
> >  3 files changed, 136 insertions(+)
> >
> > diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
> > index bd0bb600f610..d7072f6d6aa0 100644
> > --- a/include/linux/kvm_host.h
> > +++ b/include/linux/kvm_host.h
> > @@ -700,6 +700,10 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm);
> >  void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
> >  				   struct kvm_memory_slot *slot);
> >  
> > +int kvm_protect_all_memory(struct kvm *kvm);
> > +int kvm_protect_memory(struct kvm *kvm,
> > +		       unsigned long gfn, unsigned long npages, bool protect);
> > +
> >  int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
> >  			    struct page **pages, int nr_pages);
> >  
> > diff --git a/mm/mprotect.c b/mm/mprotect.c
> > index 494192ca954b..552be3b4c80a 100644
> > --- a/mm/mprotect.c
> > +++ b/mm/mprotect.c
> > @@ -505,6 +505,7 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
> >  	vm_unacct_memory(charged);
> >  	return error;
> >  }
> > +EXPORT_SYMBOL_GPL(mprotect_fixup);
> >  
> >  /*
> >   * pkey==-1 when doing a legacy mprotect()
> > diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> > index 530af95efdf3..07d45da5d2aa 100644
> > --- a/virt/kvm/kvm_main.c
> > +++ b/virt/kvm/kvm_main.c
> > @@ -155,6 +155,8 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm);
> >  static unsigned long long kvm_createvm_count;
> >  static unsigned long long kvm_active_vms;
> >  
> > +static int protect_memory(unsigned long start, unsigned long end, bool protect);
> > +
> >  __weak int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
> >  		unsigned long start, unsigned long end, bool blockable)
> >  {
> > @@ -1309,6 +1311,14 @@ int __kvm_set_memory_region(struct kvm *kvm,
> >  	if (r)
> >  		goto out_bitmap;
> >  
> > +	if (mem->memory_size && kvm->mem_protected) {
> > +		r = protect_memory(new.userspace_addr,
> > +				   new.userspace_addr + new.npages * PAGE_SIZE,
> > +				   true);
> > +		if (r)
> > +			goto out_bitmap;
> > +	}
> > +
> >  	if (old.dirty_bitmap && !new.dirty_bitmap)
> >  		kvm_destroy_dirty_bitmap(&old);
> >  	return 0;
> > @@ -2652,6 +2662,127 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn)
> >  }
> >  EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty);
> >  
> > +static int protect_memory(unsigned long start, unsigned long end, bool protect)
> > +{
> > +	struct mm_struct *mm = current->mm;
> > +	struct vm_area_struct *vma, *prev;
> > +	int ret;
> > +
> > +	if (down_write_killable(&mm->mmap_sem))
> > +		return -EINTR;
> > +
> > +	ret = -ENOMEM;
> > +	vma = find_vma(current->mm, start);
> > +	if (!vma)
> > +		goto out;
> > +
> > +	ret = -EINVAL;
> > +	if (vma->vm_start > start)
> > +		goto out;
> > +
> > +	if (start > vma->vm_start)
> > +		prev = vma;
> > +	else
> > +		prev = vma->vm_prev;
> > +
> > +	ret = 0;
> > +	while (true) {
> > +		unsigned long newflags, tmp;
> > +
> > +		tmp = vma->vm_end;
> > +		if (tmp > end)
> > +			tmp = end;
> > +
> > +		newflags = vma->vm_flags;
> > +		if (protect)
> > +			newflags |= VM_KVM_PROTECTED;
> > +		else
> > +			newflags &= ~VM_KVM_PROTECTED;
> > +
> > +		/* The VMA has been handled as part of other memslot */
> > +		if (newflags == vma->vm_flags)
> > +			goto next;
> > +
> > +		ret = mprotect_fixup(vma, &prev, start, tmp, newflags);
> > +		if (ret)
> > +			goto out;
> > +
> > +next:
> > +		start = tmp;
> > +		if (start < prev->vm_end)
> > +			start = prev->vm_end;
> > +
> > +		if (start >= end)
> > +			goto out;
> > +
> > +		vma = prev->vm_next;
> > +		if (!vma || vma->vm_start != start) {
> > +			ret = -ENOMEM;
> > +			goto out;
> > +		}
> > +	}
> > +out:
> > +	up_write(&mm->mmap_sem);
> > +	return ret;
> > +}
> > +
> > +int kvm_protect_memory(struct kvm *kvm,
> > +		       unsigned long gfn, unsigned long npages, bool protect)
> > +{
> > +	struct kvm_memory_slot *memslot;
> > +	unsigned long start, end;
> > +	gfn_t numpages;
> > +
> > +	if (!VM_KVM_PROTECTED)
> > +		return -KVM_ENOSYS;
> > +
> > +	if (!npages)
> > +		return 0;
> > +
> > +	memslot = gfn_to_memslot(kvm, gfn);
> > +	/* Not backed by memory. It's okay. */
> > +	if (!memslot)
> > +		return 0;
> > +
> > +	start = gfn_to_hva_many(memslot, gfn, &numpages);
> > +	end = start + npages * PAGE_SIZE;
> > +
> > +	/* XXX: Share range across memory slots? */
> > +	if (WARN_ON(numpages < npages))
> > +		return -EINVAL;
> > +
> > +	return protect_memory(start, end, protect);
> > +}
> > +EXPORT_SYMBOL_GPL(kvm_protect_memory);
> > +
> > +int kvm_protect_all_memory(struct kvm *kvm)
> > +{
> > +	struct kvm_memslots *slots;
> > +	struct kvm_memory_slot *memslot;
> > +	unsigned long start, end;
> > +	int i, ret = 0;;
> > +
> > +	if (!VM_KVM_PROTECTED)
> > +		return -KVM_ENOSYS;
> > +
> > +	mutex_lock(&kvm->slots_lock);
> > +	kvm->mem_protected = true;
> 
> What will happen upon guest reboot? Do we need to unprotect everything
> to make sure we'll be able to boot? Also, after the reboot how will the
> guest know that it is protected and needs to unprotect things? -> see my
> idea about converting KVM_HC_ENABLE_MEM_PROTECTED to a stateful MSR (but
> we'll likely have to reset it upon reboot anyway).

That's extremely good question. I have not considered reboot. I tend to use
-no-reboot in my setup.

I'll think how to deal with reboot. I don't know how it works now to give
a good answer.

The may not be a good solution: unprotecting memory on reboot means we
expose user data. We can wipe the data before unprotecting, but we should
not wipe BIOS and anything else that is required on reboot. I donno.

> > +	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
> > +		slots = __kvm_memslots(kvm, i);
> > +		kvm_for_each_memslot(memslot, slots) {
> > +			start = memslot->userspace_addr;
> > +			end = start + memslot->npages * PAGE_SIZE;
> > +			ret = protect_memory(start, end, true);
> > +			if (ret)
> > +				goto out;
> > +		}
> > +	}
> > +out:
> > +	mutex_unlock(&kvm->slots_lock);
> > +	return ret;
> > +}
> > +EXPORT_SYMBOL_GPL(kvm_protect_all_memory);
> > +
> >  void kvm_sigset_activate(struct kvm_vcpu *vcpu)
> >  {
> >  	if (!vcpu->sigset_active)
> 
> -- 
> Vitaly
> 
>
Huang, Kai June 3, 2020, 1:34 a.m. UTC | #3
On Mon, 2020-05-25 at 18:34 +0300, Kirill A. Shutemov wrote:
> On Mon, May 25, 2020 at 05:26:37PM +0200, Vitaly Kuznetsov wrote:
> > "Kirill A. Shutemov" <kirill@shutemov.name> writes:
> > 
> > > Add infrastructure that handles protected memory extension.
> > > 
> > > Arch-specific code has to provide hypercalls and define non-zero
> > > VM_KVM_PROTECTED.
> > > 
> > > Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
> > > ---
> > >  include/linux/kvm_host.h |   4 ++
> > >  mm/mprotect.c            |   1 +
> > >  virt/kvm/kvm_main.c      | 131 +++++++++++++++++++++++++++++++++++++++
> > >  3 files changed, 136 insertions(+)
> > > 
> > > diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
> > > index bd0bb600f610..d7072f6d6aa0 100644
> > > --- a/include/linux/kvm_host.h
> > > +++ b/include/linux/kvm_host.h
> > > @@ -700,6 +700,10 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm);
> > >  void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
> > >  				   struct kvm_memory_slot *slot);
> > >  
> > > +int kvm_protect_all_memory(struct kvm *kvm);
> > > +int kvm_protect_memory(struct kvm *kvm,
> > > +		       unsigned long gfn, unsigned long npages, bool protect);
> > > +
> > >  int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
> > >  			    struct page **pages, int nr_pages);
> > >  
> > > diff --git a/mm/mprotect.c b/mm/mprotect.c
> > > index 494192ca954b..552be3b4c80a 100644
> > > --- a/mm/mprotect.c
> > > +++ b/mm/mprotect.c
> > > @@ -505,6 +505,7 @@ mprotect_fixup(struct vm_area_struct *vma, struct
> > > vm_area_struct **pprev,
> > >  	vm_unacct_memory(charged);
> > >  	return error;
> > >  }
> > > +EXPORT_SYMBOL_GPL(mprotect_fixup);
> > >  
> > >  /*
> > >   * pkey==-1 when doing a legacy mprotect()
> > > diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
> > > index 530af95efdf3..07d45da5d2aa 100644
> > > --- a/virt/kvm/kvm_main.c
> > > +++ b/virt/kvm/kvm_main.c
> > > @@ -155,6 +155,8 @@ static void kvm_uevent_notify_change(unsigned int
> > > type, struct kvm *kvm);
> > >  static unsigned long long kvm_createvm_count;
> > >  static unsigned long long kvm_active_vms;
> > >  
> > > +static int protect_memory(unsigned long start, unsigned long end, bool
> > > protect);
> > > +
> > >  __weak int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
> > >  		unsigned long start, unsigned long end, bool blockable)
> > >  {
> > > @@ -1309,6 +1311,14 @@ int __kvm_set_memory_region(struct kvm *kvm,
> > >  	if (r)
> > >  		goto out_bitmap;
> > >  
> > > +	if (mem->memory_size && kvm->mem_protected) {
> > > +		r = protect_memory(new.userspace_addr,
> > > +				   new.userspace_addr + new.npages * PAGE_SIZE,
> > > +				   true);
> > > +		if (r)
> > > +			goto out_bitmap;
> > > +	}
> > > +
> > >  	if (old.dirty_bitmap && !new.dirty_bitmap)
> > >  		kvm_destroy_dirty_bitmap(&old);
> > >  	return 0;
> > > @@ -2652,6 +2662,127 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu
> > > *vcpu, gfn_t gfn)
> > >  }
> > >  EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty);
> > >  
> > > +static int protect_memory(unsigned long start, unsigned long end, bool
> > > protect)
> > > +{
> > > +	struct mm_struct *mm = current->mm;
> > > +	struct vm_area_struct *vma, *prev;
> > > +	int ret;
> > > +
> > > +	if (down_write_killable(&mm->mmap_sem))
> > > +		return -EINTR;
> > > +
> > > +	ret = -ENOMEM;
> > > +	vma = find_vma(current->mm, start);
> > > +	if (!vma)
> > > +		goto out;
> > > +
> > > +	ret = -EINVAL;
> > > +	if (vma->vm_start > start)
> > > +		goto out;
> > > +
> > > +	if (start > vma->vm_start)
> > > +		prev = vma;
> > > +	else
> > > +		prev = vma->vm_prev;
> > > +
> > > +	ret = 0;
> > > +	while (true) {
> > > +		unsigned long newflags, tmp;
> > > +
> > > +		tmp = vma->vm_end;
> > > +		if (tmp > end)
> > > +			tmp = end;
> > > +
> > > +		newflags = vma->vm_flags;
> > > +		if (protect)
> > > +			newflags |= VM_KVM_PROTECTED;
> > > +		else
> > > +			newflags &= ~VM_KVM_PROTECTED;
> > > +
> > > +		/* The VMA has been handled as part of other memslot */
> > > +		if (newflags == vma->vm_flags)
> > > +			goto next;
> > > +
> > > +		ret = mprotect_fixup(vma, &prev, start, tmp, newflags);
> > > +		if (ret)
> > > +			goto out;
> > > +
> > > +next:
> > > +		start = tmp;
> > > +		if (start < prev->vm_end)
> > > +			start = prev->vm_end;
> > > +
> > > +		if (start >= end)
> > > +			goto out;
> > > +
> > > +		vma = prev->vm_next;
> > > +		if (!vma || vma->vm_start != start) {
> > > +			ret = -ENOMEM;
> > > +			goto out;
> > > +		}
> > > +	}
> > > +out:
> > > +	up_write(&mm->mmap_sem);
> > > +	return ret;
> > > +}
> > > +
> > > +int kvm_protect_memory(struct kvm *kvm,
> > > +		       unsigned long gfn, unsigned long npages, bool protect)
> > > +{
> > > +	struct kvm_memory_slot *memslot;
> > > +	unsigned long start, end;
> > > +	gfn_t numpages;
> > > +
> > > +	if (!VM_KVM_PROTECTED)
> > > +		return -KVM_ENOSYS;
> > > +
> > > +	if (!npages)
> > > +		return 0;
> > > +
> > > +	memslot = gfn_to_memslot(kvm, gfn);
> > > +	/* Not backed by memory. It's okay. */
> > > +	if (!memslot)
> > > +		return 0;
> > > +
> > > +	start = gfn_to_hva_many(memslot, gfn, &numpages);
> > > +	end = start + npages * PAGE_SIZE;
> > > +
> > > +	/* XXX: Share range across memory slots? */
> > > +	if (WARN_ON(numpages < npages))
> > > +		return -EINVAL;
> > > +
> > > +	return protect_memory(start, end, protect);
> > > +}
> > > +EXPORT_SYMBOL_GPL(kvm_protect_memory);
> > > +
> > > +int kvm_protect_all_memory(struct kvm *kvm)
> > > +{
> > > +	struct kvm_memslots *slots;
> > > +	struct kvm_memory_slot *memslot;
> > > +	unsigned long start, end;
> > > +	int i, ret = 0;;
> > > +
> > > +	if (!VM_KVM_PROTECTED)
> > > +		return -KVM_ENOSYS;
> > > +
> > > +	mutex_lock(&kvm->slots_lock);
> > > +	kvm->mem_protected = true;
> > 
> > What will happen upon guest reboot? Do we need to unprotect everything
> > to make sure we'll be able to boot? Also, after the reboot how will the
> > guest know that it is protected and needs to unprotect things? -> see my
> > idea about converting KVM_HC_ENABLE_MEM_PROTECTED to a stateful MSR (but
> > we'll likely have to reset it upon reboot anyway).
> 
> That's extremely good question. I have not considered reboot. I tend to use
> -no-reboot in my setup.
> 
> I'll think how to deal with reboot. I don't know how it works now to give
> a good answer.
> 
> The may not be a good solution: unprotecting memory on reboot means we
> expose user data. We can wipe the data before unprotecting, but we should
> not wipe BIOS and anything else that is required on reboot. I donno.

If you let Qemu to protect guest memory when creating the vm, but not ask guest
kernel to enable when it boots, you won't have this problem. And guest kernel
*queries* whether its memory is protected or not during boot. This is consistent
to SEV as well.
diff mbox series

Patch

diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index bd0bb600f610..d7072f6d6aa0 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -700,6 +700,10 @@  void kvm_arch_flush_shadow_all(struct kvm *kvm);
 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
 				   struct kvm_memory_slot *slot);
 
+int kvm_protect_all_memory(struct kvm *kvm);
+int kvm_protect_memory(struct kvm *kvm,
+		       unsigned long gfn, unsigned long npages, bool protect);
+
 int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
 			    struct page **pages, int nr_pages);
 
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 494192ca954b..552be3b4c80a 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -505,6 +505,7 @@  mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
 	vm_unacct_memory(charged);
 	return error;
 }
+EXPORT_SYMBOL_GPL(mprotect_fixup);
 
 /*
  * pkey==-1 when doing a legacy mprotect()
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 530af95efdf3..07d45da5d2aa 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -155,6 +155,8 @@  static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm);
 static unsigned long long kvm_createvm_count;
 static unsigned long long kvm_active_vms;
 
+static int protect_memory(unsigned long start, unsigned long end, bool protect);
+
 __weak int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
 		unsigned long start, unsigned long end, bool blockable)
 {
@@ -1309,6 +1311,14 @@  int __kvm_set_memory_region(struct kvm *kvm,
 	if (r)
 		goto out_bitmap;
 
+	if (mem->memory_size && kvm->mem_protected) {
+		r = protect_memory(new.userspace_addr,
+				   new.userspace_addr + new.npages * PAGE_SIZE,
+				   true);
+		if (r)
+			goto out_bitmap;
+	}
+
 	if (old.dirty_bitmap && !new.dirty_bitmap)
 		kvm_destroy_dirty_bitmap(&old);
 	return 0;
@@ -2652,6 +2662,127 @@  void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn)
 }
 EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty);
 
+static int protect_memory(unsigned long start, unsigned long end, bool protect)
+{
+	struct mm_struct *mm = current->mm;
+	struct vm_area_struct *vma, *prev;
+	int ret;
+
+	if (down_write_killable(&mm->mmap_sem))
+		return -EINTR;
+
+	ret = -ENOMEM;
+	vma = find_vma(current->mm, start);
+	if (!vma)
+		goto out;
+
+	ret = -EINVAL;
+	if (vma->vm_start > start)
+		goto out;
+
+	if (start > vma->vm_start)
+		prev = vma;
+	else
+		prev = vma->vm_prev;
+
+	ret = 0;
+	while (true) {
+		unsigned long newflags, tmp;
+
+		tmp = vma->vm_end;
+		if (tmp > end)
+			tmp = end;
+
+		newflags = vma->vm_flags;
+		if (protect)
+			newflags |= VM_KVM_PROTECTED;
+		else
+			newflags &= ~VM_KVM_PROTECTED;
+
+		/* The VMA has been handled as part of other memslot */
+		if (newflags == vma->vm_flags)
+			goto next;
+
+		ret = mprotect_fixup(vma, &prev, start, tmp, newflags);
+		if (ret)
+			goto out;
+
+next:
+		start = tmp;
+		if (start < prev->vm_end)
+			start = prev->vm_end;
+
+		if (start >= end)
+			goto out;
+
+		vma = prev->vm_next;
+		if (!vma || vma->vm_start != start) {
+			ret = -ENOMEM;
+			goto out;
+		}
+	}
+out:
+	up_write(&mm->mmap_sem);
+	return ret;
+}
+
+int kvm_protect_memory(struct kvm *kvm,
+		       unsigned long gfn, unsigned long npages, bool protect)
+{
+	struct kvm_memory_slot *memslot;
+	unsigned long start, end;
+	gfn_t numpages;
+
+	if (!VM_KVM_PROTECTED)
+		return -KVM_ENOSYS;
+
+	if (!npages)
+		return 0;
+
+	memslot = gfn_to_memslot(kvm, gfn);
+	/* Not backed by memory. It's okay. */
+	if (!memslot)
+		return 0;
+
+	start = gfn_to_hva_many(memslot, gfn, &numpages);
+	end = start + npages * PAGE_SIZE;
+
+	/* XXX: Share range across memory slots? */
+	if (WARN_ON(numpages < npages))
+		return -EINVAL;
+
+	return protect_memory(start, end, protect);
+}
+EXPORT_SYMBOL_GPL(kvm_protect_memory);
+
+int kvm_protect_all_memory(struct kvm *kvm)
+{
+	struct kvm_memslots *slots;
+	struct kvm_memory_slot *memslot;
+	unsigned long start, end;
+	int i, ret = 0;;
+
+	if (!VM_KVM_PROTECTED)
+		return -KVM_ENOSYS;
+
+	mutex_lock(&kvm->slots_lock);
+	kvm->mem_protected = true;
+	for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
+		slots = __kvm_memslots(kvm, i);
+		kvm_for_each_memslot(memslot, slots) {
+			start = memslot->userspace_addr;
+			end = start + memslot->npages * PAGE_SIZE;
+			ret = protect_memory(start, end, true);
+			if (ret)
+				goto out;
+		}
+	}
+out:
+	mutex_unlock(&kvm->slots_lock);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(kvm_protect_all_memory);
+
 void kvm_sigset_activate(struct kvm_vcpu *vcpu)
 {
 	if (!vcpu->sigset_active)