diff mbox series

[RFC,v1,16/26] KVM: arm64: Add a field to indicate whether the guest page was pinned

Message ID 20240222161047.402609-17-tabba@google.com (mailing list archive)
State New, archived
Headers show
Series KVM: Restricted mapping of guest_memfd at the host and pKVM/arm64 support | expand

Commit Message

Fuad Tabba Feb. 22, 2024, 4:10 p.m. UTC
This is needed only during the transition phase from pinning to
using guestmem. Once pKVM moves to guestmem, this field will be
removed.

Signed-off-by: Fuad Tabba <tabba@google.com>
---
 arch/arm64/include/asm/kvm_host.h | 1 +
 arch/arm64/kvm/mmu.c              | 1 +
 arch/arm64/kvm/pkvm.c             | 6 ++++--
 3 files changed, 6 insertions(+), 2 deletions(-)
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 99bf2b534ff8..ab61c3ecba0c 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -210,6 +210,7 @@  struct kvm_guest_page {
 	struct rb_node		node;
 	struct page		*page;
 	u64			ipa;
+	bool			is_pinned;
 };
 
 typedef unsigned int pkvm_handle_t;
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index ae6f65717178..391d168e95d0 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1502,6 +1502,7 @@  static int pkvm_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 
 	ppage->page = page;
 	ppage->ipa = fault_ipa;
+	ppage->is_pinned = true;
 	WARN_ON(insert_ppage(kvm, ppage));
 	write_unlock(&kvm->mmu_lock);
 
diff --git a/arch/arm64/kvm/pkvm.c b/arch/arm64/kvm/pkvm.c
index 0dbde37d21d0..bfd4858a7bd1 100644
--- a/arch/arm64/kvm/pkvm.c
+++ b/arch/arm64/kvm/pkvm.c
@@ -271,7 +271,8 @@  void pkvm_destroy_hyp_vm(struct kvm *host_kvm)
 					  page_to_pfn(ppage->page)));
 		cond_resched();
 
-		unpin_user_pages_dirty_lock(&ppage->page, 1, true);
+		if (ppage->is_pinned)
+			unpin_user_pages_dirty_lock(&ppage->page, 1, true);
 		node = rb_next(node);
 		rb_erase(&ppage->node, &host_kvm->arch.pkvm.pinned_pages);
 		kfree(ppage);
@@ -362,6 +363,7 @@  void pkvm_host_reclaim_page(struct kvm *host_kvm, phys_addr_t ipa)
 				  page_to_pfn(ppage->page)));
 
 	account_locked_vm(mm, 1, false);
-	unpin_user_pages_dirty_lock(&ppage->page, 1, true);
+	if (ppage->is_pinned)
+		unpin_user_pages_dirty_lock(&ppage->page, 1, true);
 	kfree(ppage);
 }