@@ -39,7 +39,7 @@ static int kvm_xen_shared_info_init(struct kvm *kvm, gfn_t gfn)
}
do {
- ret = kvm_gfn_to_pfn_cache_init(kvm, gpc, NULL, false, true,
+ ret = kvm_gfn_to_pfn_cache_init(kvm, gpc, NULL, KVM_HOST_USES_PFN,
gpa, PAGE_SIZE, false);
if (ret)
goto out;
@@ -1221,9 +1221,9 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
* @gpc: struct gfn_to_pfn_cache object.
* @vcpu: vCPU to be used for marking pages dirty and to be woken on
* invalidation.
- * @guest_uses_pa: indicates that the resulting host physical PFN is used while
- * @vcpu is IN_GUEST_MODE so invalidations should wake it.
- * @kernel_map: requests a kernel virtual mapping (kmap / memremap).
+ * @usage: indicates if the resulting host physical PFN is used while
+ * the @vcpu is IN_GUEST_MODE and/or if the PFN used directly
+ * by KVM (and thus needs a kernel virtual mapping).
* @gpa: guest physical address to map.
* @len: sanity check; the range being access must fit a single page.
* @dirty: mark the cache dirty immediately.
@@ -1239,9 +1239,8 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
* to ensure that the cache is valid before accessing the target page.
*/
int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
- struct kvm_vcpu *vcpu, bool guest_uses_pa,
- bool kernel_map, gpa_t gpa, unsigned long len,
- bool dirty);
+ struct kvm_vcpu *vcpu, enum pfn_cache_usage usage,
+ gpa_t gpa, unsigned long len, bool dirty);
/**
* kvm_gfn_to_pfn_cache_check - check validity of a gfn_to_pfn_cache.
@@ -18,6 +18,7 @@ struct kvm_memslots;
enum kvm_mr_change;
+#include <linux/bits.h>
#include <linux/types.h>
#include <linux/spinlock_types.h>
@@ -46,6 +47,12 @@ typedef u64 hfn_t;
typedef hfn_t kvm_pfn_t;
+enum pfn_cache_usage {
+ KVM_GUEST_USES_PFN = BIT(0),
+ KVM_HOST_USES_PFN = BIT(1),
+ KVM_GUEST_AND_HOST_USE_PFN = KVM_GUEST_USES_PFN | KVM_HOST_USES_PFN,
+};
+
struct gfn_to_hva_cache {
u64 generation;
gpa_t gpa;
@@ -64,11 +71,10 @@ struct gfn_to_pfn_cache {
rwlock_t lock;
void *khva;
kvm_pfn_t pfn;
+ enum pfn_cache_usage usage;
bool active;
bool valid;
bool dirty;
- bool kernel_map;
- bool guest_uses_pa;
};
#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
@@ -42,7 +42,7 @@ void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start,
* If a guest vCPU could be using the physical address,
* it needs to be woken.
*/
- if (gpc->guest_uses_pa) {
+ if (gpc->usage & KVM_GUEST_USES_PFN) {
if (!wake_vcpus) {
wake_vcpus = true;
bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS);
@@ -219,7 +219,7 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
goto map_done;
}
- if (gpc->kernel_map) {
+ if (gpc->usage & KVM_HOST_USES_PFN) {
if (new_pfn == old_pfn) {
new_khva = old_khva;
old_pfn = KVM_PFN_ERR_FAULT;
@@ -299,10 +299,11 @@ EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_unmap);
int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
- struct kvm_vcpu *vcpu, bool guest_uses_pa,
- bool kernel_map, gpa_t gpa, unsigned long len,
- bool dirty)
+ struct kvm_vcpu *vcpu, enum pfn_cache_usage usage,
+ gpa_t gpa, unsigned long len, bool dirty)
{
+ WARN_ON_ONCE(!usage || (usage & KVM_GUEST_AND_HOST_USE_PFN) != usage);
+
if (!gpc->active) {
rwlock_init(&gpc->lock);
@@ -310,8 +311,7 @@ int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
gpc->pfn = KVM_PFN_ERR_FAULT;
gpc->uhva = KVM_HVA_ERR_BAD;
gpc->vcpu = vcpu;
- gpc->kernel_map = kernel_map;
- gpc->guest_uses_pa = guest_uses_pa;
+ gpc->usage = usage;
gpc->valid = false;
gpc->active = true;