diff mbox series

[11/23] KVM: x86/mmu: Pass const memslot to kvm_mmu_init_sp() and descendants

Message ID 20220203010051.2813563-12-dmatlack@google.com (mailing list archive)
State New, archived
Headers show
Series Extend Eager Page Splitting to the shadow MMU | expand

Commit Message

David Matlack Feb. 3, 2022, 1 a.m. UTC
Use a const pointer so that kvm_mmu_init_sp() can be called from
contexts where we have a const pointer.

No functional change intended.

Signed-off-by: David Matlack <dmatlack@google.com>
---
 arch/x86/include/asm/kvm_page_track.h | 2 +-
 arch/x86/kvm/mmu/mmu.c                | 7 +++----
 arch/x86/kvm/mmu/mmu_internal.h       | 2 +-
 arch/x86/kvm/mmu/page_track.c         | 4 ++--
 arch/x86/kvm/mmu/tdp_mmu.c            | 2 +-
 arch/x86/kvm/mmu/tdp_mmu.h            | 2 +-
 6 files changed, 9 insertions(+), 10 deletions(-)

Comments

Ben Gardon Feb. 23, 2022, 11:27 p.m. UTC | #1
On Wed, Feb 2, 2022 at 5:02 PM David Matlack <dmatlack@google.com> wrote:
>
> Use a const pointer so that kvm_mmu_init_sp() can be called from
> contexts where we have a const pointer.
>
> No functional change intended.
>

Reviewed-by: Ben Gardon <bgardon@google.com>

> Signed-off-by: David Matlack <dmatlack@google.com>
> ---
>  arch/x86/include/asm/kvm_page_track.h | 2 +-
>  arch/x86/kvm/mmu/mmu.c                | 7 +++----
>  arch/x86/kvm/mmu/mmu_internal.h       | 2 +-
>  arch/x86/kvm/mmu/page_track.c         | 4 ++--
>  arch/x86/kvm/mmu/tdp_mmu.c            | 2 +-
>  arch/x86/kvm/mmu/tdp_mmu.h            | 2 +-
>  6 files changed, 9 insertions(+), 10 deletions(-)
>
> diff --git a/arch/x86/include/asm/kvm_page_track.h b/arch/x86/include/asm/kvm_page_track.h
> index eb186bc57f6a..3a2dc183ae9a 100644
> --- a/arch/x86/include/asm/kvm_page_track.h
> +++ b/arch/x86/include/asm/kvm_page_track.h
> @@ -58,7 +58,7 @@ int kvm_page_track_create_memslot(struct kvm *kvm,
>                                   unsigned long npages);
>
>  void kvm_slot_page_track_add_page(struct kvm *kvm,
> -                                 struct kvm_memory_slot *slot, gfn_t gfn,
> +                                 const struct kvm_memory_slot *slot, gfn_t gfn,
>                                   enum kvm_page_track_mode mode);
>  void kvm_slot_page_track_remove_page(struct kvm *kvm,
>                                      struct kvm_memory_slot *slot, gfn_t gfn,
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index a5e3bb632542..de7c47ee0def 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -805,7 +805,7 @@ void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn)
>  }
>
>  static void account_shadowed(struct kvm *kvm,
> -                            struct kvm_memory_slot *slot,
> +                            const struct kvm_memory_slot *slot,
>                              struct kvm_mmu_page *sp)
>  {
>         gfn_t gfn;
> @@ -1384,7 +1384,7 @@ int kvm_cpu_dirty_log_size(void)
>  }
>
>  bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
> -                                   struct kvm_memory_slot *slot, u64 gfn,
> +                                   const struct kvm_memory_slot *slot, u64 gfn,
>                                     int min_level)
>  {
>         struct kvm_rmap_head *rmap_head;
> @@ -2158,9 +2158,8 @@ static struct kvm_mmu_page *kvm_mmu_get_existing_sp(struct kvm_vcpu *vcpu,
>         return sp;
>  }
>
> -
>  static void kvm_mmu_init_sp(struct kvm *kvm, struct kvm_mmu_page *sp,
> -                           struct kvm_memory_slot *slot, gfn_t gfn,
> +                           const struct kvm_memory_slot *slot, gfn_t gfn,
>                             union kvm_mmu_page_role role)
>  {
>         struct hlist_head *sp_list;
> diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h
> index c5f2c0b9177d..e6bcea5a0aa9 100644
> --- a/arch/x86/kvm/mmu/mmu_internal.h
> +++ b/arch/x86/kvm/mmu/mmu_internal.h
> @@ -123,7 +123,7 @@ int mmu_try_to_unsync_pages(struct kvm *kvm, const struct kvm_memory_slot *slot,
>  void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn);
>  void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn);
>  bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
> -                                   struct kvm_memory_slot *slot, u64 gfn,
> +                                   const struct kvm_memory_slot *slot, u64 gfn,
>                                     int min_level);
>  void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
>                                         u64 start_gfn, u64 pages);
> diff --git a/arch/x86/kvm/mmu/page_track.c b/arch/x86/kvm/mmu/page_track.c
> index 68eb1fb548b6..ebd704946a35 100644
> --- a/arch/x86/kvm/mmu/page_track.c
> +++ b/arch/x86/kvm/mmu/page_track.c
> @@ -83,7 +83,7 @@ int kvm_page_track_write_tracking_alloc(struct kvm_memory_slot *slot)
>         return 0;
>  }
>
> -static void update_gfn_track(struct kvm_memory_slot *slot, gfn_t gfn,
> +static void update_gfn_track(const struct kvm_memory_slot *slot, gfn_t gfn,
>                              enum kvm_page_track_mode mode, short count)
>  {
>         int index, val;
> @@ -111,7 +111,7 @@ static void update_gfn_track(struct kvm_memory_slot *slot, gfn_t gfn,
>   * @mode: tracking mode, currently only write track is supported.
>   */
>  void kvm_slot_page_track_add_page(struct kvm *kvm,
> -                                 struct kvm_memory_slot *slot, gfn_t gfn,
> +                                 const struct kvm_memory_slot *slot, gfn_t gfn,
>                                   enum kvm_page_track_mode mode)
>  {
>
> diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
> index 4ff1af24b5aa..34c451f1eac9 100644
> --- a/arch/x86/kvm/mmu/tdp_mmu.c
> +++ b/arch/x86/kvm/mmu/tdp_mmu.c
> @@ -1645,7 +1645,7 @@ static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
>   * Returns true if an SPTE was set and a TLB flush is needed.
>   */
>  bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
> -                                  struct kvm_memory_slot *slot, gfn_t gfn,
> +                                  const struct kvm_memory_slot *slot, gfn_t gfn,
>                                    int min_level)
>  {
>         struct kvm_mmu_page *root;
> diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h
> index 3f987785702a..b1265149a05d 100644
> --- a/arch/x86/kvm/mmu/tdp_mmu.h
> +++ b/arch/x86/kvm/mmu/tdp_mmu.h
> @@ -64,7 +64,7 @@ void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
>                                        const struct kvm_memory_slot *slot);
>
>  bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
> -                                  struct kvm_memory_slot *slot, gfn_t gfn,
> +                                  const struct kvm_memory_slot *slot, gfn_t gfn,
>                                    int min_level);
>
>  void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm,
> --
> 2.35.0.rc2.247.g8bbb082509-goog
>
diff mbox series

Patch

diff --git a/arch/x86/include/asm/kvm_page_track.h b/arch/x86/include/asm/kvm_page_track.h
index eb186bc57f6a..3a2dc183ae9a 100644
--- a/arch/x86/include/asm/kvm_page_track.h
+++ b/arch/x86/include/asm/kvm_page_track.h
@@ -58,7 +58,7 @@  int kvm_page_track_create_memslot(struct kvm *kvm,
 				  unsigned long npages);
 
 void kvm_slot_page_track_add_page(struct kvm *kvm,
-				  struct kvm_memory_slot *slot, gfn_t gfn,
+				  const struct kvm_memory_slot *slot, gfn_t gfn,
 				  enum kvm_page_track_mode mode);
 void kvm_slot_page_track_remove_page(struct kvm *kvm,
 				     struct kvm_memory_slot *slot, gfn_t gfn,
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index a5e3bb632542..de7c47ee0def 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -805,7 +805,7 @@  void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn)
 }
 
 static void account_shadowed(struct kvm *kvm,
-			     struct kvm_memory_slot *slot,
+			     const struct kvm_memory_slot *slot,
 			     struct kvm_mmu_page *sp)
 {
 	gfn_t gfn;
@@ -1384,7 +1384,7 @@  int kvm_cpu_dirty_log_size(void)
 }
 
 bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
-				    struct kvm_memory_slot *slot, u64 gfn,
+				    const struct kvm_memory_slot *slot, u64 gfn,
 				    int min_level)
 {
 	struct kvm_rmap_head *rmap_head;
@@ -2158,9 +2158,8 @@  static struct kvm_mmu_page *kvm_mmu_get_existing_sp(struct kvm_vcpu *vcpu,
 	return sp;
 }
 
-
 static void kvm_mmu_init_sp(struct kvm *kvm, struct kvm_mmu_page *sp,
-			    struct kvm_memory_slot *slot, gfn_t gfn,
+			    const struct kvm_memory_slot *slot, gfn_t gfn,
 			    union kvm_mmu_page_role role)
 {
 	struct hlist_head *sp_list;
diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h
index c5f2c0b9177d..e6bcea5a0aa9 100644
--- a/arch/x86/kvm/mmu/mmu_internal.h
+++ b/arch/x86/kvm/mmu/mmu_internal.h
@@ -123,7 +123,7 @@  int mmu_try_to_unsync_pages(struct kvm *kvm, const struct kvm_memory_slot *slot,
 void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn);
 void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn);
 bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
-				    struct kvm_memory_slot *slot, u64 gfn,
+				    const struct kvm_memory_slot *slot, u64 gfn,
 				    int min_level);
 void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
 					u64 start_gfn, u64 pages);
diff --git a/arch/x86/kvm/mmu/page_track.c b/arch/x86/kvm/mmu/page_track.c
index 68eb1fb548b6..ebd704946a35 100644
--- a/arch/x86/kvm/mmu/page_track.c
+++ b/arch/x86/kvm/mmu/page_track.c
@@ -83,7 +83,7 @@  int kvm_page_track_write_tracking_alloc(struct kvm_memory_slot *slot)
 	return 0;
 }
 
-static void update_gfn_track(struct kvm_memory_slot *slot, gfn_t gfn,
+static void update_gfn_track(const struct kvm_memory_slot *slot, gfn_t gfn,
 			     enum kvm_page_track_mode mode, short count)
 {
 	int index, val;
@@ -111,7 +111,7 @@  static void update_gfn_track(struct kvm_memory_slot *slot, gfn_t gfn,
  * @mode: tracking mode, currently only write track is supported.
  */
 void kvm_slot_page_track_add_page(struct kvm *kvm,
-				  struct kvm_memory_slot *slot, gfn_t gfn,
+				  const struct kvm_memory_slot *slot, gfn_t gfn,
 				  enum kvm_page_track_mode mode)
 {
 
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 4ff1af24b5aa..34c451f1eac9 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -1645,7 +1645,7 @@  static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
  * Returns true if an SPTE was set and a TLB flush is needed.
  */
 bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
-				   struct kvm_memory_slot *slot, gfn_t gfn,
+				   const struct kvm_memory_slot *slot, gfn_t gfn,
 				   int min_level)
 {
 	struct kvm_mmu_page *root;
diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h
index 3f987785702a..b1265149a05d 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.h
+++ b/arch/x86/kvm/mmu/tdp_mmu.h
@@ -64,7 +64,7 @@  void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
 				       const struct kvm_memory_slot *slot);
 
 bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
-				   struct kvm_memory_slot *slot, gfn_t gfn,
+				   const struct kvm_memory_slot *slot, gfn_t gfn,
 				   int min_level);
 
 void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm,