@@ -798,7 +798,7 @@ static int mmu_spte_clear_track_bits(u64 *sptep)
* kvm mmu, before reclaiming the page, we should
* unmap it from mmu first.
*/
- WARN_ON(!kvm_is_reserved_pfn(pfn) && !page_count(pfn_to_page(pfn)));
+ WARN_ON(kvm_is_refcounted_pfn(pfn) && !page_count(pfn_to_page(pfn)));
if (is_accessed_spte(old_spte))
kvm_set_pfn_accessed(pfn);
@@ -3166,7 +3166,7 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
* PT_PAGE_TABLE_LEVEL and there would be no adjustment done
* here.
*/
- if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) &&
+ if (!is_error_noslot_pfn(pfn) && kvm_is_refcounted_pfn(pfn) &&
level == PT_PAGE_TABLE_LEVEL &&
PageTransCompoundMap(pfn_to_page(pfn)) &&
!mmu_gfn_lpage_is_disallowed(vcpu, gfn, PT_DIRECTORY_LEVEL)) {
@@ -5668,7 +5668,7 @@ static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
* mapping if the indirect sp has level = 1.
*/
if (sp->role.direct &&
- !kvm_is_reserved_pfn(pfn) &&
+ kvm_is_refcounted_pfn(pfn) &&
PageTransCompoundMap(pfn_to_page(pfn))) {
pte_list_remove(rmap_head, sptep);
need_tlb_flush = 1;
@@ -906,7 +906,7 @@ void kvm_arch_sync_events(struct kvm *kvm);
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
-bool kvm_is_reserved_pfn(kvm_pfn_t pfn);
+bool kvm_is_refcounted_pfn(kvm_pfn_t pfn);
struct kvm_irq_ack_notifier {
struct hlist_node link;
@@ -146,7 +146,15 @@ __weak int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
return 0;
}
-bool kvm_is_reserved_pfn(kvm_pfn_t pfn)
+bool kvm_is_refcounted_pfn(kvm_pfn_t pfn)
+{
+ if (pfn_valid(pfn))
+ return !PageReserved(pfn_to_page(pfn));
+
+ return false;
+}
+
+static bool kvm_is_reserved_pfn(kvm_pfn_t pfn)
{
if (pfn_valid(pfn))
return PageReserved(pfn_to_page(pfn));
@@ -1678,7 +1686,7 @@ EXPORT_SYMBOL_GPL(kvm_release_page_clean);
void kvm_release_pfn_clean(kvm_pfn_t pfn)
{
- if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn))
+ if (!is_error_noslot_pfn(pfn) && kvm_is_refcounted_pfn(pfn))
put_page(pfn_to_page(pfn));
}
EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
@@ -1700,12 +1708,8 @@ EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
void kvm_set_pfn_dirty(kvm_pfn_t pfn)
{
- if (!kvm_is_reserved_pfn(pfn)) {
- struct page *page = pfn_to_page(pfn);
-
- if (!PageReserved(page))
- SetPageDirty(page);
- }
+ if (!kvm_is_reserved_pfn(pfn))
+ SetPageDirty(pfn_to_page(pfn));
}
EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
@@ -1718,7 +1722,7 @@ EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
void kvm_get_pfn(kvm_pfn_t pfn)
{
- if (!kvm_is_reserved_pfn(pfn))
+ if (kvm_is_refcounted_pfn(pfn))
get_page(pfn_to_page(pfn));
}
EXPORT_SYMBOL_GPL(kvm_get_pfn);
The function kvm_is_reserved_pfn really has two uses. One is to test for if we should be updating the reference count on a page when we are accessing it. The other is to determine if we should be updating the dirty flag or marking pages as accessed. In preparation for blurring the lines between ZONE_DEVICE and system RAM I am splitting out the dirty/accessed cases into their own checks. Doing this allows us to add ZONE_DEVICE to the list of refcounted pages without having to worry about us introducing possible issues with pages being marked as dirty or accessed and possibly causing any issues with attempted LRU accesses on the ZONE_DEVICE pages. Signed-off-by: Alexander Duyck <alexander.h.duyck@linux.intel.com> --- arch/x86/kvm/mmu.c | 6 +++--- include/linux/kvm_host.h | 2 +- virt/kvm/kvm_main.c | 22 +++++++++++++--------- 3 files changed, 17 insertions(+), 13 deletions(-)