@@ -1051,20 +1051,18 @@ int kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
}
while (length > 0) {
- kvm_pfn_t pfn = gfn_to_pfn_prot(kvm, gfn, write, NULL);
+ struct page *page = __gfn_to_page(kvm, gfn, write);
void *maddr;
unsigned long num_tags;
- struct page *page;
- if (is_error_noslot_pfn(pfn)) {
- ret = -EFAULT;
- goto out;
- }
-
- page = pfn_to_online_page(pfn);
if (!page) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ if (!pfn_to_online_page(page_to_pfn(page))) {
/* Reject ZONE_DEVICE memory */
- kvm_release_pfn_clean(pfn);
+ kvm_release_page_unused(page);
ret = -EFAULT;
goto out;
}
@@ -1078,7 +1076,7 @@ int kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
/* No tags in memory, so write zeros */
num_tags = MTE_GRANULES_PER_PAGE -
clear_user(tags, MTE_GRANULES_PER_PAGE);
- kvm_release_pfn_clean(pfn);
+ kvm_release_page_clean(page);
} else {
/*
* Only locking to serialise with a concurrent
@@ -1093,8 +1091,7 @@ int kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
if (num_tags != MTE_GRANULES_PER_PAGE)
mte_clear_page_tags(maddr);
set_page_mte_tagged(page);
-
- kvm_release_pfn_dirty(pfn);
+ kvm_release_page_dirty(page);
}
if (num_tags != MTE_GRANULES_PER_PAGE) {
Use __gfn_to_page() instead when copying MTE tags between guest and userspace. This will eventually allow removing gfn_to_pfn_prot(), gfn_to_pfn(), kvm_pfn_to_refcounted_page(), and related APIs. Signed-off-by: Sean Christopherson <seanjc@google.com> --- arch/arm64/kvm/guest.c | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-)