@@ -1254,6 +1254,7 @@ struct kvm_x86_ops {
bool (*apic_init_signal_blocked)(struct kvm_vcpu *vcpu);
int (*enable_direct_tlbflush)(struct kvm_vcpu *vcpu);
+ void (*commit_memory_region)(struct kvm *kvm, enum kvm_mr_change change);
int (*page_enc_status_hc)(struct kvm *kvm, unsigned long gpa,
unsigned long sz, unsigned long mode);
int (*get_page_enc_bitmap)(struct kvm *kvm,
@@ -1377,6 +1377,41 @@ static int sev_resize_page_enc_bitmap(struct kvm *kvm, unsigned long new_size)
return 0;
}
+void svm_commit_memory_region(struct kvm *kvm, enum kvm_mr_change change)
+{
+ struct kvm_memslots *slots;
+ struct kvm_memory_slot *memslot;
+ gfn_t start, end = 0;
+
+ spin_lock(&kvm->mmu_lock);
+ if (change == KVM_MR_CREATE) {
+ slots = kvm_memslots(kvm);
+ kvm_for_each_memslot(memslot, slots) {
+ start = memslot->base_gfn;
+ end = memslot->base_gfn + memslot->npages;
+ /*
+ * KVM memslots is a sorted list, starting with
+ * the highest mapped guest PA, so pick the topmost
+ * valid guest PA.
+ */
+ if (memslot->npages)
+ break;
+ }
+ }
+ spin_unlock(&kvm->mmu_lock);
+
+ if (end) {
+ /*
+ * NORE: This callback is invoked in vm ioctl
+ * set_user_memory_region, hence we can use a
+ * mutex here.
+ */
+ mutex_lock(&kvm->lock);
+ sev_resize_page_enc_bitmap(kvm, end);
+ mutex_unlock(&kvm->lock);
+ }
+}
+
int svm_page_enc_status_hc(struct kvm *kvm, unsigned long gpa,
unsigned long npages, unsigned long enc)
{
@@ -4015,6 +4015,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.check_nested_events = svm_check_nested_events,
+ .commit_memory_region = svm_commit_memory_region,
.page_enc_status_hc = svm_page_enc_status_hc,
.get_page_enc_bitmap = svm_get_page_enc_bitmap,
.set_page_enc_bitmap = svm_set_page_enc_bitmap,
@@ -406,6 +406,7 @@ int svm_page_enc_status_hc(struct kvm *kvm, unsigned long gpa,
unsigned long npages, unsigned long enc);
int svm_get_page_enc_bitmap(struct kvm *kvm, struct kvm_page_enc_bitmap *bmap);
int svm_set_page_enc_bitmap(struct kvm *kvm, struct kvm_page_enc_bitmap *bmap);
+void svm_commit_memory_region(struct kvm *kvm, enum kvm_mr_change change);
/* avic.c */
@@ -10133,6 +10133,11 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
kvm_mmu_change_mmu_pages(kvm,
kvm_mmu_calculate_default_mmu_pages(kvm));
+ if (change == KVM_MR_CREATE || change == KVM_MR_DELETE) {
+ if (kvm_x86_ops.commit_memory_region)
+ kvm_x86_ops.commit_memory_region(kvm, change);
+ }
+
/*
* Dirty logging tracks sptes in 4k granularity, meaning that large
* sptes have to be split. If live migration is successful, the guest