diff mbox series

[1/2] KVM: x86/mmu: Avoid multiple hash lookups in kvm_get_mmu_page()

Message ID 20200623194027.23135-2-sean.j.christopherson@intel.com (mailing list archive)
State New, archived
Headers show
Series KVM: x86/mmu: Optimizations for kvm_get_mmu_page() | expand

Commit Message

Sean Christopherson June 23, 2020, 7:40 p.m. UTC
Refactor for_each_valid_sp() to take the list of shadow pages instead of
retrieving it from a gfn to avoid doing the gfn->list hash and lookup
multiple times during kvm_get_mmu_page().

Cc: Peter Feiner <pfeiner@google.com>
Cc: Jon Cargille <jcargill@google.com>
Cc: Jim Mattson <jmattson@google.com>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
---
 arch/x86/kvm/mmu/mmu.c | 17 +++++++++--------
 1 file changed, 9 insertions(+), 8 deletions(-)

Comments

Jon Cargille June 23, 2020, 8:27 p.m. UTC | #1
LGTM.

Reviewed-By: Jon Cargille <jcargill@google.com>


On Tue, Jun 23, 2020 at 12:40 PM Sean Christopherson
<sean.j.christopherson@intel.com> wrote:
>
> Refactor for_each_valid_sp() to take the list of shadow pages instead of
> retrieving it from a gfn to avoid doing the gfn->list hash and lookup
> multiple times during kvm_get_mmu_page().
>
> Cc: Peter Feiner <pfeiner@google.com>
> Cc: Jon Cargille <jcargill@google.com>
> Cc: Jim Mattson <jmattson@google.com>
> Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
> ---
>  arch/x86/kvm/mmu/mmu.c | 17 +++++++++--------
>  1 file changed, 9 insertions(+), 8 deletions(-)
>
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index 3dd0af7e7515..67f8f82e9783 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -2258,15 +2258,14 @@ static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
>  static void kvm_mmu_commit_zap_page(struct kvm *kvm,
>                                     struct list_head *invalid_list);
>
> -
> -#define for_each_valid_sp(_kvm, _sp, _gfn)                             \
> -       hlist_for_each_entry(_sp,                                       \
> -         &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)], hash_link) \
> +#define for_each_valid_sp(_kvm, _sp, _list)                            \
> +       hlist_for_each_entry(_sp, _list, hash_link)                     \
>                 if (is_obsolete_sp((_kvm), (_sp))) {                    \
>                 } else
>
>  #define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn)                        \
> -       for_each_valid_sp(_kvm, _sp, _gfn)                              \
> +       for_each_valid_sp(_kvm, _sp,                                    \
> +         &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)])     \
>                 if ((_sp)->gfn != (_gfn) || (_sp)->role.direct) {} else
>
>  static inline bool is_ept_sp(struct kvm_mmu_page *sp)
> @@ -2477,6 +2476,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
>                                              unsigned int access)
>  {
>         union kvm_mmu_page_role role;
> +       struct hlist_head *sp_list;
>         unsigned quadrant;
>         struct kvm_mmu_page *sp;
>         bool need_sync = false;
> @@ -2496,7 +2496,9 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
>                 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
>                 role.quadrant = quadrant;
>         }
> -       for_each_valid_sp(vcpu->kvm, sp, gfn) {
> +
> +       sp_list = &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)];
> +       for_each_valid_sp(vcpu->kvm, sp, sp_list) {
>                 if (sp->gfn != gfn) {
>                         collisions++;
>                         continue;
> @@ -2533,8 +2535,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
>
>         sp->gfn = gfn;
>         sp->role = role;
> -       hlist_add_head(&sp->hash_link,
> -               &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)]);
> +       hlist_add_head(&sp->hash_link, sp_list);
>         if (!direct) {
>                 /*
>                  * we should do write protection before syncing pages
> --
> 2.26.0
>
diff mbox series

Patch

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 3dd0af7e7515..67f8f82e9783 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -2258,15 +2258,14 @@  static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
 static void kvm_mmu_commit_zap_page(struct kvm *kvm,
 				    struct list_head *invalid_list);
 
-
-#define for_each_valid_sp(_kvm, _sp, _gfn)				\
-	hlist_for_each_entry(_sp,					\
-	  &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)], hash_link) \
+#define for_each_valid_sp(_kvm, _sp, _list)				\
+	hlist_for_each_entry(_sp, _list, hash_link)			\
 		if (is_obsolete_sp((_kvm), (_sp))) {			\
 		} else
 
 #define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn)			\
-	for_each_valid_sp(_kvm, _sp, _gfn)				\
+	for_each_valid_sp(_kvm, _sp,					\
+	  &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)])	\
 		if ((_sp)->gfn != (_gfn) || (_sp)->role.direct) {} else
 
 static inline bool is_ept_sp(struct kvm_mmu_page *sp)
@@ -2477,6 +2476,7 @@  static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
 					     unsigned int access)
 {
 	union kvm_mmu_page_role role;
+	struct hlist_head *sp_list;
 	unsigned quadrant;
 	struct kvm_mmu_page *sp;
 	bool need_sync = false;
@@ -2496,7 +2496,9 @@  static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
 		quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
 		role.quadrant = quadrant;
 	}
-	for_each_valid_sp(vcpu->kvm, sp, gfn) {
+
+	sp_list = &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)];
+	for_each_valid_sp(vcpu->kvm, sp, sp_list) {
 		if (sp->gfn != gfn) {
 			collisions++;
 			continue;
@@ -2533,8 +2535,7 @@  static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
 
 	sp->gfn = gfn;
 	sp->role = role;
-	hlist_add_head(&sp->hash_link,
-		&vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)]);
+	hlist_add_head(&sp->hash_link, sp_list);
 	if (!direct) {
 		/*
 		 * we should do write protection before syncing pages