@@ -688,6 +688,9 @@ int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
void *data, int offset, unsigned long len);
int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
gpa_t gpa, unsigned long len);
+unsigned long kvm_xchg_guest_cached(struct kvm *kvm,
+ struct gfn_to_hva_cache *ghc, unsigned long offset,
+ unsigned long new, int size);
int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
@@ -2010,6 +2010,48 @@ int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
}
EXPORT_SYMBOL_GPL(kvm_read_guest_cached);
+unsigned long kvm_xchg_guest_cached(struct kvm *kvm,
+ struct gfn_to_hva_cache *ghc, unsigned long offset,
+ unsigned long new, int size)
+{
+ unsigned long r;
+ void *kva;
+ struct page *page;
+ kvm_pfn_t pfn;
+
+ WARN_ON(offset > ghc->len);
+
+ pfn = gfn_to_pfn_atomic(kvm, ghc->gpa >> PAGE_SHIFT);
+ page = kvm_pfn_to_page(pfn);
+
+ if (is_error_page(page))
+ return -EFAULT;
+
+ kva = kmap_atomic(page) + offset_in_page(ghc->gpa) + offset;
+ switch (size) {
+ case 1:
+ r = xchg((char *)kva, new);
+ break;
+ case 2:
+ r = xchg((short *)kva, new);
+ break;
+ case 4:
+ r = xchg((int *)kva, new);
+ break;
+ case 8:
+ r = xchg((long *)kva, new);
+ break;
+ default:
+ kunmap_atomic(kva);
+ return -EFAULT;
+ }
+
+ kunmap_atomic(kva);
+ mark_page_dirty_in_slot(ghc->memslot, ghc->gpa >> PAGE_SHIFT);
+
+ return r;
+}
+
int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
{
const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0)));