diff mbox series

[2.5/3] KVM: gmem: limit hole-punching to ranges within the file

Message ID 20241108163228.374110-1-pbonzini@redhat.com (mailing list archive)
State New
Headers show
Series KVM: gmem: track preparedness a page at a time | expand

Commit Message

Paolo Bonzini Nov. 8, 2024, 4:32 p.m. UTC
Do not pass out-of-bounds values to kvm_gmem_mark_range_unprepared().

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 virt/kvm/guest_memfd.c | 11 +++++++++--
 1 file changed, 9 insertions(+), 2 deletions(-)

	Sent separately because I thought this was a bug also in the current
	code but, on closer look, it is fine because ksys_fallocate checks that
	there is no overflow.
diff mbox series

Patch

diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c
index 412d49c6d491..7dc89ceef782 100644
--- a/virt/kvm/guest_memfd.c
+++ b/virt/kvm/guest_memfd.c
@@ -324,10 +324,17 @@  static void kvm_gmem_invalidate_end(struct kvm_gmem *gmem, pgoff_t start,
 static long kvm_gmem_punch_hole(struct inode *inode, loff_t offset, loff_t len)
 {
 	struct list_head *gmem_list = &inode->i_mapping->i_private_list;
-	pgoff_t start = offset >> PAGE_SHIFT;
-	pgoff_t end = (offset + len) >> PAGE_SHIFT;
+	loff_t size = i_size_read(inode);
+	pgoff_t start, end;
 	struct kvm_gmem *gmem;
 
+	if (offset > size)
+		return 0;
+
+	len = min(size - offset, len);
+	start = offset >> PAGE_SHIFT;
+	end = (offset + len) >> PAGE_SHIFT;
+
 	/*
 	 * Bindings must be stable across invalidation to ensure the start+end
 	 * are balanced.