diff mbox series

[20/54] KVM: x86/mmu: Add struct and helpers to retrieve MMU role bits from regs

Message ID 20210622175739.3610207-21-seanjc@google.com (mailing list archive)
State New, archived
Headers show
Series KVM: x86/mmu: Bug fixes and summer cleaning | expand

Commit Message

Sean Christopherson June 22, 2021, 5:57 p.m. UTC
Introduce "struct kvm_mmu_role_regs" to hold the register state that is
incorporated into the mmu_role.  For nested TDP, the register state that
is factored into the MMU isn't vCPU state; the dedicated struct will be
used to propagate the correct state throughout the flows without having
to pass multiple params, and also provides helpers for the various flag
accessors.

Intentionally make the new helpers cumbersome/ugly by prepending four
underscores.  In the not-too-distant future, it will be preferable to use
the mmu_role to query bits as the mmu_role can drop irrelevant bits
without creating contradictions, e.g. clearing CR4 bits when CR0.PG=0.
Reserve the clean helper names (no underscores) for the mmu_role.

Add a helper for vCPU conversion, which is the common case.

No functional change intended.

Signed-off-by: Sean Christopherson <seanjc@google.com>
---
 arch/x86/kvm/mmu/mmu.c | 66 +++++++++++++++++++++++++++++++++---------
 1 file changed, 53 insertions(+), 13 deletions(-)

Comments

kernel test robot June 23, 2021, 1:58 a.m. UTC | #1
Hi Sean,

I love your patch! Perhaps something to improve:

[auto build test WARNING on kvm/queue]
[also build test WARNING on next-20210622]
[cannot apply to linus/master vhost/linux-next v5.13-rc7]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]

url:    https://github.com/0day-ci/linux/commits/Sean-Christopherson/KVM-x86-mmu-Bug-fixes-and-summer-cleaning/20210623-020645
base:   https://git.kernel.org/pub/scm/virt/kvm/kvm.git queue
config: x86_64-randconfig-a002-20210622 (attached as .config)
compiler: clang version 13.0.0 (https://github.com/llvm/llvm-project b3634d3e88b7f26534a5057bff182b7dced584fc)
reproduce (this is a W=1 build):
        wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # install x86_64 cross compiling tool for clang build
        # apt-get install binutils-x86-64-linux-gnu
        # https://github.com/0day-ci/linux/commit/01d7a0135a12b1e0e5134d0575e424fd20d1a90f
        git remote add linux-review https://github.com/0day-ci/linux
        git fetch --no-tags linux-review Sean-Christopherson/KVM-x86-mmu-Bug-fixes-and-summer-cleaning/20210623-020645
        git checkout 01d7a0135a12b1e0e5134d0575e424fd20d1a90f
        # save the attached .config to linux build tree
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross ARCH=x86_64 

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>

All warnings (new ones prefixed by >>):

>> arch/x86/kvm/mmu/mmu.c:209:26: warning: no previous prototype for function 'vcpu_to_role_regs' [-Wmissing-prototypes]
   struct kvm_mmu_role_regs vcpu_to_role_regs(struct kvm_vcpu *vcpu)
                            ^
   arch/x86/kvm/mmu/mmu.c:209:1: note: declare 'static' if the function is not intended to be used outside of this translation unit
   struct kvm_mmu_role_regs vcpu_to_role_regs(struct kvm_vcpu *vcpu)
   ^
   static 
   arch/x86/kvm/mmu/mmu.c:199:1: warning: unused function '____is_cr0_wp' [-Wunused-function]
   BUILD_MMU_ROLE_REGS_ACCESSOR(cr0, wp, X86_CR0_WP);
   ^
   arch/x86/kvm/mmu/mmu.c:194:20: note: expanded from macro 'BUILD_MMU_ROLE_REGS_ACCESSOR'
   static inline bool ____is_##reg##_##name(struct kvm_mmu_role_regs *regs)\
                      ^
   <scratch space>:58:1: note: expanded from here
   ____is_cr0_wp
   ^
   arch/x86/kvm/mmu/mmu.c:200:1: warning: unused function '____is_cr4_pse' [-Wunused-function]
   BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pse, X86_CR4_PSE);
   ^
   arch/x86/kvm/mmu/mmu.c:194:20: note: expanded from macro 'BUILD_MMU_ROLE_REGS_ACCESSOR'
   static inline bool ____is_##reg##_##name(struct kvm_mmu_role_regs *regs)\
                      ^
   <scratch space>:62:1: note: expanded from here
   ____is_cr4_pse
   ^
   arch/x86/kvm/mmu/mmu.c:202:1: warning: unused function '____is_cr4_smep' [-Wunused-function]
   BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, smep, X86_CR4_SMEP);
   ^
   arch/x86/kvm/mmu/mmu.c:194:20: note: expanded from macro 'BUILD_MMU_ROLE_REGS_ACCESSOR'
   static inline bool ____is_##reg##_##name(struct kvm_mmu_role_regs *regs)\
                      ^
   <scratch space>:70:1: note: expanded from here
   ____is_cr4_smep
   ^
   arch/x86/kvm/mmu/mmu.c:203:1: warning: unused function '____is_cr4_smap' [-Wunused-function]
   BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, smap, X86_CR4_SMAP);
   ^
   arch/x86/kvm/mmu/mmu.c:194:20: note: expanded from macro 'BUILD_MMU_ROLE_REGS_ACCESSOR'
   static inline bool ____is_##reg##_##name(struct kvm_mmu_role_regs *regs)\
                      ^
   <scratch space>:74:1: note: expanded from here
   ____is_cr4_smap
   ^
   arch/x86/kvm/mmu/mmu.c:204:1: warning: unused function '____is_cr4_pke' [-Wunused-function]
   BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pke, X86_CR4_PKE);
   ^
   arch/x86/kvm/mmu/mmu.c:194:20: note: expanded from macro 'BUILD_MMU_ROLE_REGS_ACCESSOR'
   static inline bool ____is_##reg##_##name(struct kvm_mmu_role_regs *regs)\
                      ^
   <scratch space>:78:1: note: expanded from here
   ____is_cr4_pke
   ^
   arch/x86/kvm/mmu/mmu.c:205:1: warning: unused function '____is_cr4_la57' [-Wunused-function]
   BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, la57, X86_CR4_LA57);
   ^
   arch/x86/kvm/mmu/mmu.c:194:20: note: expanded from macro 'BUILD_MMU_ROLE_REGS_ACCESSOR'
   static inline bool ____is_##reg##_##name(struct kvm_mmu_role_regs *regs)\
                      ^
   <scratch space>:82:1: note: expanded from here
   ____is_cr4_la57
   ^
   arch/x86/kvm/mmu/mmu.c:206:1: warning: unused function '____is_efer_nx' [-Wunused-function]
   BUILD_MMU_ROLE_REGS_ACCESSOR(efer, nx, EFER_NX);
   ^
   arch/x86/kvm/mmu/mmu.c:194:20: note: expanded from macro 'BUILD_MMU_ROLE_REGS_ACCESSOR'
   static inline bool ____is_##reg##_##name(struct kvm_mmu_role_regs *regs)\
                      ^
   <scratch space>:85:1: note: expanded from here
   ____is_efer_nx
   ^
   8 warnings generated.


vim +/vcpu_to_role_regs +209 arch/x86/kvm/mmu/mmu.c

   208	
 > 209	struct kvm_mmu_role_regs vcpu_to_role_regs(struct kvm_vcpu *vcpu)
   210	{
   211		struct kvm_mmu_role_regs regs = {
   212			.cr0 = kvm_read_cr0_bits(vcpu, KVM_MMU_CR0_ROLE_BITS),
   213			.cr4 = kvm_read_cr4_bits(vcpu, KVM_MMU_CR4_ROLE_BITS),
   214			.efer = vcpu->arch.efer,
   215		};
   216	
   217		return regs;
   218	}
   219	

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
Paolo Bonzini June 23, 2021, 5:18 p.m. UTC | #2
On 22/06/21 19:57, Sean Christopherson wrote:
> +/*
> + * Yes, lot's of underscores.  They're a hint that you probably shouldn't be
> + * reading from the role_regs.  Once the mmu_role is constructed, it becomes
> + * the single source of truth for the MMU's state.
> + */
> +#define BUILD_MMU_ROLE_REGS_ACCESSOR(reg, name, flag)			\
> +static inline bool ____is_##reg##_##name(struct kvm_mmu_role_regs *regs)\
> +{									\
> +	return !!(regs->reg & flag);					\
> +}

Ok, that's a decent reason to have these accessors in the first place. :)

Paolo
diff mbox series

Patch

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 5e3ee4aba2ff..3616c3b7618e 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -176,9 +176,46 @@  static void mmu_spte_set(u64 *sptep, u64 spte);
 static union kvm_mmu_page_role
 kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu);
 
+struct kvm_mmu_role_regs {
+	const unsigned long cr0;
+	const unsigned long cr4;
+	const u64 efer;
+};
+
 #define CREATE_TRACE_POINTS
 #include "mmutrace.h"
 
+/*
+ * Yes, lot's of underscores.  They're a hint that you probably shouldn't be
+ * reading from the role_regs.  Once the mmu_role is constructed, it becomes
+ * the single source of truth for the MMU's state.
+ */
+#define BUILD_MMU_ROLE_REGS_ACCESSOR(reg, name, flag)			\
+static inline bool ____is_##reg##_##name(struct kvm_mmu_role_regs *regs)\
+{									\
+	return !!(regs->reg & flag);					\
+}
+BUILD_MMU_ROLE_REGS_ACCESSOR(cr0, pg, X86_CR0_PG);
+BUILD_MMU_ROLE_REGS_ACCESSOR(cr0, wp, X86_CR0_WP);
+BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pse, X86_CR4_PSE);
+BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pae, X86_CR4_PAE);
+BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, smep, X86_CR4_SMEP);
+BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, smap, X86_CR4_SMAP);
+BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pke, X86_CR4_PKE);
+BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, la57, X86_CR4_LA57);
+BUILD_MMU_ROLE_REGS_ACCESSOR(efer, nx, EFER_NX);
+BUILD_MMU_ROLE_REGS_ACCESSOR(efer, lma, EFER_LMA);
+
+struct kvm_mmu_role_regs vcpu_to_role_regs(struct kvm_vcpu *vcpu)
+{
+	struct kvm_mmu_role_regs regs = {
+		.cr0 = kvm_read_cr0_bits(vcpu, KVM_MMU_CR0_ROLE_BITS),
+		.cr4 = kvm_read_cr4_bits(vcpu, KVM_MMU_CR4_ROLE_BITS),
+		.efer = vcpu->arch.efer,
+	};
+
+	return regs;
+}
 
 static inline bool kvm_available_flush_tlb_with_range(void)
 {
@@ -4654,14 +4691,14 @@  kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
 }
 
 static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
-				    unsigned long cr0, unsigned long cr4,
-				    u64 efer, union kvm_mmu_role new_role)
+				    struct kvm_mmu_role_regs *regs,
+				    union kvm_mmu_role new_role)
 {
-	if (!(cr0 & X86_CR0_PG))
+	if (!____is_cr0_pg(regs))
 		nonpaging_init_context(vcpu, context);
-	else if (efer & EFER_LMA)
+	else if (____is_efer_lma(regs))
 		paging64_init_context(vcpu, context);
-	else if (cr4 & X86_CR4_PAE)
+	else if (____is_cr4_pae(regs))
 		paging32E_init_context(vcpu, context);
 	else
 		paging32_init_context(vcpu, context);
@@ -4672,15 +4709,15 @@  static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu *conte
 	reset_shadow_zero_bits_mask(vcpu, context);
 }
 
-static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
-				unsigned long cr4, u64 efer)
+static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu,
+				struct kvm_mmu_role_regs *regs)
 {
 	struct kvm_mmu *context = &vcpu->arch.root_mmu;
 	union kvm_mmu_role new_role =
 		kvm_calc_shadow_mmu_root_page_role(vcpu, false);
 
 	if (new_role.as_u64 != context->mmu_role.as_u64)
-		shadow_mmu_init_context(vcpu, context, cr0, cr4, efer, new_role);
+		shadow_mmu_init_context(vcpu, context, regs, new_role);
 }
 
 static union kvm_mmu_role
@@ -4699,12 +4736,17 @@  void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
 			     unsigned long cr4, u64 efer, gpa_t nested_cr3)
 {
 	struct kvm_mmu *context = &vcpu->arch.guest_mmu;
+	struct kvm_mmu_role_regs regs = {
+		.cr0 = cr0,
+		.cr4 = cr4,
+		.efer = efer,
+	};
 	union kvm_mmu_role new_role = kvm_calc_shadow_npt_root_page_role(vcpu);
 
 	__kvm_mmu_new_pgd(vcpu, nested_cr3, new_role.base);
 
 	if (new_role.as_u64 != context->mmu_role.as_u64)
-		shadow_mmu_init_context(vcpu, context, cr0, cr4, efer, new_role);
+		shadow_mmu_init_context(vcpu, context, &regs, new_role);
 
 	/*
 	 * Redo the shadow bits, the reset done by shadow_mmu_init_context()
@@ -4773,11 +4815,9 @@  EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);
 static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
 {
 	struct kvm_mmu *context = &vcpu->arch.root_mmu;
+	struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
 
-	kvm_init_shadow_mmu(vcpu,
-			    kvm_read_cr0_bits(vcpu, KVM_MMU_CR0_ROLE_BITS),
-			    kvm_read_cr4_bits(vcpu, KVM_MMU_CR4_ROLE_BITS),
-			    vcpu->arch.efer);
+	kvm_init_shadow_mmu(vcpu, &regs);
 
 	context->get_guest_pgd     = get_cr3;
 	context->get_pdptr         = kvm_pdptr_read;