diff mbox series

[RFC,v8,50/56] KVM: SEV: Handle restricted memory invalidations for SNP

Message ID 20230220183847.59159-51-michael.roth@amd.com (mailing list archive)
State New
Headers show
Series Add AMD Secure Nested Paging (SEV-SNP) Hypervisor Support | expand

Commit Message

Michael Roth Feb. 20, 2023, 6:38 p.m. UTC
Implement a platform hook to do the work of restoring the direct map
entries and cleaning up RMP table entries for restricted memory that is
being freed back to the host.

Signed-off-by: Michael Roth <michael.roth@amd.com>
---
 arch/x86/kvm/svm/sev.c | 62 ++++++++++++++++++++++++++++++++++++++++++
 arch/x86/kvm/svm/svm.c |  1 +
 arch/x86/kvm/svm/svm.h |  1 +
 3 files changed, 64 insertions(+)

Comments

Zhi Wang March 1, 2023, 10:41 a.m. UTC | #1
On Mon, 20 Feb 2023 12:38:41 -0600
Michael Roth <michael.roth@amd.com> wrote:

> Implement a platform hook to do the work of restoring the direct map
> entries and cleaning up RMP table entries for restricted memory that is
> being freed back to the host.
> 
> Signed-off-by: Michael Roth <michael.roth@amd.com>
> ---
>  arch/x86/kvm/svm/sev.c | 62 ++++++++++++++++++++++++++++++++++++++++++
>  arch/x86/kvm/svm/svm.c |  1 +
>  arch/x86/kvm/svm/svm.h |  1 +
>  3 files changed, 64 insertions(+)
> 
> diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
> index 7a74a92cb39a..bedec90d034f 100644
> --- a/arch/x86/kvm/svm/sev.c
> +++ b/arch/x86/kvm/svm/sev.c
> @@ -4509,3 +4509,65 @@ bool sev_fault_is_private(struct kvm *kvm, gpa_t gpa, u64 error_code, bool *priv
>  
>  	return true;
>  }
> +
> +void sev_invalidate_private_range(struct kvm_memory_slot *slot, gfn_t start, gfn_t end)
> +{
> +	gfn_t gfn = start;
> +
> +	if (!sev_snp_guest(slot->kvm))
> +		return;
> +
> +	if (!kvm_slot_can_be_private(slot)) {
> +		pr_warn_ratelimited("SEV: Memslot for GFN: 0x%llx is not private.\n",
> +				    gfn);
> +		return;
> +	}
> +

This is a generic check for both SNP and TDX, it should be moved to
kvm_restrictedmem_invalidate_begin().

> +	while (gfn <= end) {
> +		gpa_t gpa = gfn_to_gpa(gfn);
> +		int level = PG_LEVEL_4K;
> +		int order, rc;
> +		kvm_pfn_t pfn;
> +
> +		rc = kvm_restrictedmem_get_pfn(slot, gfn, &pfn, &order);
> +		if (rc) {
> +			pr_warn_ratelimited("SEV: Failed to retrieve restricted PFN for GFN 0x%llx, rc: %d\n",
> +					    gfn, rc);
> +			gfn++;
> +			continue;
> +		}
> +
> +		if (order) {
> +			int rmp_level;
> +
> +			if (IS_ALIGNED(gpa, page_level_size(PG_LEVEL_2M)) &&
> +			    gpa + page_level_size(PG_LEVEL_2M) <= gfn_to_gpa(end))
> +				level = PG_LEVEL_2M;
> +			else
> +				pr_debug("%s: GPA 0x%llx is not aligned to 2M, skipping 2M directmap restoration\n",
> +					 __func__, gpa);
> +
> +			/*
> +			 * TODO: It may still be possible to restore 2M mapping here,
> +			 * but keep it simple for now.
> +			 */
> +			if (level == PG_LEVEL_2M &&
> +			    (!snp_lookup_rmpentry(pfn, &rmp_level) || rmp_level == PG_LEVEL_4K)) {
> +				pr_debug("%s: PFN 0x%llx is not mapped as 2M private range, skipping 2M directmap restoration\n",
> +					 __func__, pfn);
> +				level = PG_LEVEL_4K;
> +			}
> +		}
> +
> +		pr_debug("%s: GPA %llx PFN %llx order %d level %d\n",
> +			 __func__, gpa, pfn, order, level);
> +		rc = snp_make_page_shared(slot->kvm, gpa, pfn, level);
> +		if (rc)
> +			pr_err("SEV: Failed to restore page to shared, GPA: 0x%llx PFN: 0x%llx order: %d rc: %d\n",
> +			       gpa, pfn, order, rc);
> +
> +		gfn += page_level_size(level) >> PAGE_SHIFT;
> +		put_page(pfn_to_page(pfn));
> +		cond_resched();
> +	}
> +}
> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> index 18e4a6c17d11..3fe5f13b5f3a 100644
> --- a/arch/x86/kvm/svm/svm.c
> +++ b/arch/x86/kvm/svm/svm.c
> @@ -4862,6 +4862,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
>  	.adjust_mapping_level = sev_adjust_mapping_level,
>  	.update_mem_attr = sev_update_mem_attr,
>  	.fault_is_private = sev_fault_is_private,
> +	.invalidate_restricted_mem = sev_invalidate_private_range,
>  };
>  
>  /*
> diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
> index 97038afa8020..857b674e68f0 100644
> --- a/arch/x86/kvm/svm/svm.h
> +++ b/arch/x86/kvm/svm/svm.h
> @@ -727,6 +727,7 @@ void handle_rmp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code);
>  void sev_snp_init_protected_guest_state(struct kvm_vcpu *vcpu);
>  int sev_update_mem_attr(struct kvm_memory_slot *slot, unsigned int attr,
>  			gfn_t start, gfn_t end);
> +void sev_invalidate_private_range(struct kvm_memory_slot *slot, gfn_t start, gfn_t end);
>  
>  bool sev_fault_is_private(struct kvm *kvm, gpa_t gpa, u64 error_code, bool *private_fault);
>
diff mbox series

Patch

diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 7a74a92cb39a..bedec90d034f 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -4509,3 +4509,65 @@  bool sev_fault_is_private(struct kvm *kvm, gpa_t gpa, u64 error_code, bool *priv
 
 	return true;
 }
+
+void sev_invalidate_private_range(struct kvm_memory_slot *slot, gfn_t start, gfn_t end)
+{
+	gfn_t gfn = start;
+
+	if (!sev_snp_guest(slot->kvm))
+		return;
+
+	if (!kvm_slot_can_be_private(slot)) {
+		pr_warn_ratelimited("SEV: Memslot for GFN: 0x%llx is not private.\n",
+				    gfn);
+		return;
+	}
+
+	while (gfn <= end) {
+		gpa_t gpa = gfn_to_gpa(gfn);
+		int level = PG_LEVEL_4K;
+		int order, rc;
+		kvm_pfn_t pfn;
+
+		rc = kvm_restrictedmem_get_pfn(slot, gfn, &pfn, &order);
+		if (rc) {
+			pr_warn_ratelimited("SEV: Failed to retrieve restricted PFN for GFN 0x%llx, rc: %d\n",
+					    gfn, rc);
+			gfn++;
+			continue;
+		}
+
+		if (order) {
+			int rmp_level;
+
+			if (IS_ALIGNED(gpa, page_level_size(PG_LEVEL_2M)) &&
+			    gpa + page_level_size(PG_LEVEL_2M) <= gfn_to_gpa(end))
+				level = PG_LEVEL_2M;
+			else
+				pr_debug("%s: GPA 0x%llx is not aligned to 2M, skipping 2M directmap restoration\n",
+					 __func__, gpa);
+
+			/*
+			 * TODO: It may still be possible to restore 2M mapping here,
+			 * but keep it simple for now.
+			 */
+			if (level == PG_LEVEL_2M &&
+			    (!snp_lookup_rmpentry(pfn, &rmp_level) || rmp_level == PG_LEVEL_4K)) {
+				pr_debug("%s: PFN 0x%llx is not mapped as 2M private range, skipping 2M directmap restoration\n",
+					 __func__, pfn);
+				level = PG_LEVEL_4K;
+			}
+		}
+
+		pr_debug("%s: GPA %llx PFN %llx order %d level %d\n",
+			 __func__, gpa, pfn, order, level);
+		rc = snp_make_page_shared(slot->kvm, gpa, pfn, level);
+		if (rc)
+			pr_err("SEV: Failed to restore page to shared, GPA: 0x%llx PFN: 0x%llx order: %d rc: %d\n",
+			       gpa, pfn, order, rc);
+
+		gfn += page_level_size(level) >> PAGE_SHIFT;
+		put_page(pfn_to_page(pfn));
+		cond_resched();
+	}
+}
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 18e4a6c17d11..3fe5f13b5f3a 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -4862,6 +4862,7 @@  static struct kvm_x86_ops svm_x86_ops __initdata = {
 	.adjust_mapping_level = sev_adjust_mapping_level,
 	.update_mem_attr = sev_update_mem_attr,
 	.fault_is_private = sev_fault_is_private,
+	.invalidate_restricted_mem = sev_invalidate_private_range,
 };
 
 /*
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 97038afa8020..857b674e68f0 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -727,6 +727,7 @@  void handle_rmp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code);
 void sev_snp_init_protected_guest_state(struct kvm_vcpu *vcpu);
 int sev_update_mem_attr(struct kvm_memory_slot *slot, unsigned int attr,
 			gfn_t start, gfn_t end);
+void sev_invalidate_private_range(struct kvm_memory_slot *slot, gfn_t start, gfn_t end);
 
 bool sev_fault_is_private(struct kvm *kvm, gpa_t gpa, u64 error_code, bool *private_fault);