Message ID | 33812f5282bc42e0e8e6eaaa2a6a63ce4d258bfc.1705965635.git.isaku.yamahata@intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | KVM TDX basic feature support | expand |
On 1/23/2024 7:53 AM, isaku.yamahata@intel.com wrote: > From: Isaku Yamahata <isaku.yamahata@intel.com> > > Because TDX support introduces private mapping, add a new member in union > kvm_mmu_page_role with access functions to check the member. > > Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com> > --- > arch/x86/include/asm/kvm_host.h | 27 +++++++++++++++++++++++++++ > arch/x86/kvm/mmu/mmu_internal.h | 5 +++++ > arch/x86/kvm/mmu/spte.h | 6 ++++++ > 3 files changed, 38 insertions(+) > > diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h > index 313519edd79e..0cdbbc21136b 100644 > --- a/arch/x86/include/asm/kvm_host.h > +++ b/arch/x86/include/asm/kvm_host.h > @@ -349,7 +349,12 @@ union kvm_mmu_page_role { > unsigned ad_disabled:1; > unsigned guest_mode:1; > unsigned passthrough:1; > +#ifdef CONFIG_KVM_MMU_PRIVATE > + unsigned is_private:1; > + unsigned :4; > +#else > unsigned :5; > +#endif > > /* > * This is left at the top of the word so that > @@ -361,6 +366,28 @@ union kvm_mmu_page_role { > }; > }; > > +#ifdef CONFIG_KVM_MMU_PRIVATE > +static inline bool kvm_mmu_page_role_is_private(union kvm_mmu_page_role role) > +{ > + return !!role.is_private; > +} > + > +static inline void kvm_mmu_page_role_set_private(union kvm_mmu_page_role *role) > +{ > + role->is_private = 1; > +} > +#else > +static inline bool kvm_mmu_page_role_is_private(union kvm_mmu_page_role role) > +{ > + return false; > +} > + > +static inline void kvm_mmu_page_role_set_private(union kvm_mmu_page_role *role) > +{ > + WARN_ON_ONCE(1); > +} > +#endif > + > /* > * kvm_mmu_extended_role complements kvm_mmu_page_role, tracking properties > * relevant to the current MMU configuration. When loading CR0, CR4, or EFER, > diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h > index 2b9377442927..97af4e39ce6f 100644 > --- a/arch/x86/kvm/mmu/mmu_internal.h > +++ b/arch/x86/kvm/mmu/mmu_internal.h > @@ -145,6 +145,11 @@ static inline int kvm_mmu_page_as_id(struct kvm_mmu_page *sp) > return kvm_mmu_role_as_id(sp->role); > } > > +static inline bool is_private_sp(const struct kvm_mmu_page *sp) > +{ > + return kvm_mmu_page_role_is_private(sp->role); > +} > + > static inline bool kvm_mmu_page_ad_need_write_protect(struct kvm_mmu_page *sp) > { > /* > diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h > index 1a163aee9ec6..88db32cba0fd 100644 > --- a/arch/x86/kvm/mmu/spte.h > +++ b/arch/x86/kvm/mmu/spte.h > @@ -264,6 +264,12 @@ static inline struct kvm_mmu_page *root_to_sp(hpa_t root) > return spte_to_child_sp(root); > } > > +static inline bool is_private_sptep(u64 *sptep) > +{ > + WARN_ON_ONCE(!sptep); If sptep is NULL, should return here, otherwise, the following code will de-reference a illegal pointer. > + return is_private_sp(sptep_to_sp(sptep)); > +} > + > static inline bool is_mmio_spte(struct kvm *kvm, u64 spte) > { > return (spte & shadow_mmio_mask) == kvm->arch.shadow_mmio_value &&
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 313519edd79e..0cdbbc21136b 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -349,7 +349,12 @@ union kvm_mmu_page_role { unsigned ad_disabled:1; unsigned guest_mode:1; unsigned passthrough:1; +#ifdef CONFIG_KVM_MMU_PRIVATE + unsigned is_private:1; + unsigned :4; +#else unsigned :5; +#endif /* * This is left at the top of the word so that @@ -361,6 +366,28 @@ union kvm_mmu_page_role { }; }; +#ifdef CONFIG_KVM_MMU_PRIVATE +static inline bool kvm_mmu_page_role_is_private(union kvm_mmu_page_role role) +{ + return !!role.is_private; +} + +static inline void kvm_mmu_page_role_set_private(union kvm_mmu_page_role *role) +{ + role->is_private = 1; +} +#else +static inline bool kvm_mmu_page_role_is_private(union kvm_mmu_page_role role) +{ + return false; +} + +static inline void kvm_mmu_page_role_set_private(union kvm_mmu_page_role *role) +{ + WARN_ON_ONCE(1); +} +#endif + /* * kvm_mmu_extended_role complements kvm_mmu_page_role, tracking properties * relevant to the current MMU configuration. When loading CR0, CR4, or EFER, diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h index 2b9377442927..97af4e39ce6f 100644 --- a/arch/x86/kvm/mmu/mmu_internal.h +++ b/arch/x86/kvm/mmu/mmu_internal.h @@ -145,6 +145,11 @@ static inline int kvm_mmu_page_as_id(struct kvm_mmu_page *sp) return kvm_mmu_role_as_id(sp->role); } +static inline bool is_private_sp(const struct kvm_mmu_page *sp) +{ + return kvm_mmu_page_role_is_private(sp->role); +} + static inline bool kvm_mmu_page_ad_need_write_protect(struct kvm_mmu_page *sp) { /* diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h index 1a163aee9ec6..88db32cba0fd 100644 --- a/arch/x86/kvm/mmu/spte.h +++ b/arch/x86/kvm/mmu/spte.h @@ -264,6 +264,12 @@ static inline struct kvm_mmu_page *root_to_sp(hpa_t root) return spte_to_child_sp(root); } +static inline bool is_private_sptep(u64 *sptep) +{ + WARN_ON_ONCE(!sptep); + return is_private_sp(sptep_to_sp(sptep)); +} + static inline bool is_mmio_spte(struct kvm *kvm, u64 spte) { return (spte & shadow_mmio_mask) == kvm->arch.shadow_mmio_value &&