diff mbox

[2/9] x86/mtrr: drop mtrr_if indirection

Message ID 1471390109-10407-3-git-send-email-cardoe@cardoe.com (mailing list archive)
State New, archived
Headers show

Commit Message

Douglas Goldstein Aug. 16, 2016, 11:28 p.m. UTC
There can only ever be one mtrr_if now and that is the generic
implementation so instead of going through an indirect call change
everything to call the generic implementation directly. The is_cpu()
macro would result in the left side always being equal to
X86_VENDOR_INTEL for the generic implementation due to Intel having a
value of 0. The use_intel() macro was always true in the generic
implementation case as well. Removed some extraneous whitespace at
the same time.

Signed-off-by: Doug Goldstein <cardoe@cardoe.com>
---
 xen/arch/x86/cpu/mtrr/generic.c   |  2 +-
 xen/arch/x86/cpu/mtrr/main.c      | 47 ++++++++++++++-------------------------
 xen/arch/x86/cpu/mtrr/mtrr.h      |  4 ++--
 xen/arch/x86/platform_hypercall.c |  2 +-
 4 files changed, 21 insertions(+), 34 deletions(-)

Comments

Jan Beulich Aug. 17, 2016, 12:49 p.m. UTC | #1
>>> On 17.08.16 at 01:28, <cardoe@cardoe.com> wrote:
> There can only ever be one mtrr_if now and that is the generic
> implementation

This is only true when taking into consideration that cpu_has_mtrr
is #define-d to 1 right now. I'm not sure that's actually a good
assumption (especially when think about running Xen itself
virtualized, or possibly adding a mode of operation where no MTRRs
are to be used). But if we want to keep it that way, then I'd suggest
this patch should include removing cpu_has_mtrr (which will then
show to the reviewers that the checks of mtrr_if against NULL
indeed are dead code.

> @@ -569,22 +561,19 @@ struct mtrr_value {
>  void __init mtrr_bp_init(void)
>  {
>  	if (cpu_has_mtrr) {
> -		mtrr_if = &generic_mtrr_ops;
>  		size_or_mask = ~((1ULL << (paddr_bits - PAGE_SHIFT)) - 1);
>  		size_and_mask = ~size_or_mask & 0xfffff00000ULL;
>  	}
>  
> -	if (mtrr_if) {
> -		set_num_var_ranges();
> -		init_table();
> -		if (use_intel())
> -			get_mtrr_state();
> -	}
> +    set_num_var_ranges();
> +    init_table();
> +    if (use_intel())
> +        get_mtrr_state();
>  }

Please don't break indentation style.

> --- a/xen/arch/x86/cpu/mtrr/mtrr.h
> +++ b/xen/arch/x86/cpu/mtrr/mtrr.h
> @@ -63,8 +63,8 @@ extern void set_mtrr_ops(const struct mtrr_ops *);
>  extern u64 size_or_mask, size_and_mask;
>  extern const struct mtrr_ops *mtrr_if;
>  
> -#define is_cpu(vnd)	(mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
> -#define use_intel()	(mtrr_if && mtrr_if->use_intel_if == 1)
> +#define is_cpu(vnd)	(X86_VENDOR_INTEL == X86_VENDOR_##vnd)
> +#define use_intel()	(1)

Is the latter really useful to keep then?

Jan
Douglas Goldstein Aug. 18, 2016, 1:59 a.m. UTC | #2
On 8/17/16 7:49 AM, Jan Beulich wrote:
>>>> On 17.08.16 at 01:28, <cardoe@cardoe.com> wrote:
>> There can only ever be one mtrr_if now and that is the generic
>> implementation
> 
> This is only true when taking into consideration that cpu_has_mtrr
> is #define-d to 1 right now. I'm not sure that's actually a good
> assumption (especially when think about running Xen itself
> virtualized, or possibly adding a mode of operation where no MTRRs
> are to be used). But if we want to keep it that way, then I'd suggest
> this patch should include removing cpu_has_mtrr (which will then
> show to the reviewers that the checks of mtrr_if against NULL
> indeed are dead code.

Sure I can remove cpu_has_mtrr that would certainly make it cleaner. Is
it ok with you and Andrew to make the assumption that we'll always have
MTRRs (until the day we don't like you described)?

> 
>> @@ -569,22 +561,19 @@ struct mtrr_value {
>>  void __init mtrr_bp_init(void)
>>  {
>>  	if (cpu_has_mtrr) {
>> -		mtrr_if = &generic_mtrr_ops;
>>  		size_or_mask = ~((1ULL << (paddr_bits - PAGE_SHIFT)) - 1);
>>  		size_and_mask = ~size_or_mask & 0xfffff00000ULL;
>>  	}
>>  
>> -	if (mtrr_if) {
>> -		set_num_var_ranges();
>> -		init_table();
>> -		if (use_intel())
>> -			get_mtrr_state();
>> -	}
>> +    set_num_var_ranges();
>> +    init_table();
>> +    if (use_intel())
>> +        get_mtrr_state();
>>  }
> 
> Please don't break indentation style.

Sad panda. This file has tabs. Sorry I missed that.

> 
>> --- a/xen/arch/x86/cpu/mtrr/mtrr.h
>> +++ b/xen/arch/x86/cpu/mtrr/mtrr.h
>> @@ -63,8 +63,8 @@ extern void set_mtrr_ops(const struct mtrr_ops *);
>>  extern u64 size_or_mask, size_and_mask;
>>  extern const struct mtrr_ops *mtrr_if;
>>  
>> -#define is_cpu(vnd)	(mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
>> -#define use_intel()	(mtrr_if && mtrr_if->use_intel_if == 1)
>> +#define is_cpu(vnd)	(X86_VENDOR_INTEL == X86_VENDOR_##vnd)
>> +#define use_intel()	(1)
> 
> Is the latter really useful to keep then?

Would you like me to squash patch 4 into this or make a note in the
commit message that further clean ups are coming?

--
Doug Goldstein
Jan Beulich Aug. 18, 2016, 9:37 a.m. UTC | #3
>>> On 18.08.16 at 03:59, <cardoe@cardoe.com> wrote:
> On 8/17/16 7:49 AM, Jan Beulich wrote:
>>>>> On 17.08.16 at 01:28, <cardoe@cardoe.com> wrote:
>>> There can only ever be one mtrr_if now and that is the generic
>>> implementation
>> 
>> This is only true when taking into consideration that cpu_has_mtrr
>> is #define-d to 1 right now. I'm not sure that's actually a good
>> assumption (especially when think about running Xen itself
>> virtualized, or possibly adding a mode of operation where no MTRRs
>> are to be used). But if we want to keep it that way, then I'd suggest
>> this patch should include removing cpu_has_mtrr (which will then
>> show to the reviewers that the checks of mtrr_if against NULL
>> indeed are dead code.
> 
> Sure I can remove cpu_has_mtrr that would certainly make it cleaner. Is
> it ok with you and Andrew to make the assumption that we'll always have
> MTRRs (until the day we don't like you described)?

Well, that's what I've basically put up for discussion. My personal
preference would be to drop the hard coding of cpu_has_mtrr (and
hence keep it as well as mtrr_if).

>>> --- a/xen/arch/x86/cpu/mtrr/mtrr.h
>>> +++ b/xen/arch/x86/cpu/mtrr/mtrr.h
>>> @@ -63,8 +63,8 @@ extern void set_mtrr_ops(const struct mtrr_ops *);
>>>  extern u64 size_or_mask, size_and_mask;
>>>  extern const struct mtrr_ops *mtrr_if;
>>>  
>>> -#define is_cpu(vnd)	(mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
>>> -#define use_intel()	(mtrr_if && mtrr_if->use_intel_if == 1)
>>> +#define is_cpu(vnd)	(X86_VENDOR_INTEL == X86_VENDOR_##vnd)
>>> +#define use_intel()	(1)
>> 
>> Is the latter really useful to keep then?
> 
> Would you like me to squash patch 4 into this or make a note in the
> commit message that further clean ups are coming?

Either way is fine with me, all I wish is for it to be clear that you
don't stop half ways with the cleanup (which I'm glad you took a
stab on, btw).

Jan
Andrew Cooper Aug. 18, 2016, 9:40 a.m. UTC | #4
On 18/08/16 02:59, Doug Goldstein wrote:
> On 8/17/16 7:49 AM, Jan Beulich wrote:
>>>>> On 17.08.16 at 01:28, <cardoe@cardoe.com> wrote:
>>> There can only ever be one mtrr_if now and that is the generic
>>> implementation
>> This is only true when taking into consideration that cpu_has_mtrr
>> is #define-d to 1 right now. I'm not sure that's actually a good
>> assumption (especially when think about running Xen itself
>> virtualized, or possibly adding a mode of operation where no MTRRs
>> are to be used). But if we want to keep it that way, then I'd suggest
>> this patch should include removing cpu_has_mtrr (which will then
>> show to the reviewers that the checks of mtrr_if against NULL
>> indeed are dead code.
> Sure I can remove cpu_has_mtrr that would certainly make it cleaner. Is
> it ok with you and Andrew to make the assumption that we'll always have
> MTRRs (until the day we don't like you described)?

Please don't remove cpu_has_mtrr.  I plan to make better use of it with
the future plan to move PV guests into a PVH container.

In such a case, the PVH container won't want/need MTRRs, and we will get
better performance by not having them available.

~Andrew
diff mbox

Patch

diff --git a/xen/arch/x86/cpu/mtrr/generic.c b/xen/arch/x86/cpu/mtrr/generic.c
index 224d231..5c4b273 100644
--- a/xen/arch/x86/cpu/mtrr/generic.c
+++ b/xen/arch/x86/cpu/mtrr/generic.c
@@ -265,7 +265,7 @@  int mtrr_generic_get_free_region(unsigned long base, unsigned long size, int rep
 	if (replace_reg >= 0 && replace_reg < max)
 		return replace_reg;
 	for (i = 0; i < max; ++i) {
-		mtrr_if->get(i, &lbase, &lsize, &ltype);
+		mtrr_generic_get(i, &lbase, &lsize, &ltype);
 		if (lsize == 0)
 			return i;
 	}
diff --git a/xen/arch/x86/cpu/mtrr/main.c b/xen/arch/x86/cpu/mtrr/main.c
index bf489e3..ff908ad 100644
--- a/xen/arch/x86/cpu/mtrr/main.c
+++ b/xen/arch/x86/cpu/mtrr/main.c
@@ -58,8 +58,6 @@  static DEFINE_MUTEX(mtrr_mutex);
 u64 __read_mostly size_or_mask;
 u64 __read_mostly size_and_mask;
 
-const struct mtrr_ops *__read_mostly mtrr_if = NULL;
-
 static void set_mtrr(unsigned int reg, unsigned long base,
 		     unsigned long size, mtrr_type type);
 
@@ -82,7 +80,7 @@  static const char *mtrr_attrib_to_str(int x)
 /*  Returns non-zero if we have the write-combining memory type  */
 static int have_wrcomb(void)
 {
-	return (mtrr_if->have_wrcomb ? mtrr_if->have_wrcomb() : 0);
+	return mtrr_generic_have_wrcomb();
 }
 
 /*  This function returns the number of variable MTRRs  */
@@ -150,9 +148,9 @@  static void ipi_handler(void *info)
 	if (data->smp_reg == ~0U) /* update all mtrr registers */
 		/* At the cpu hot-add time this will reinitialize mtrr 
  		 * registres on the existing cpus. It is ok.  */
-		mtrr_if->set_all();
+		mtrr_generic_set_all();
 	else /* single mtrr register update */
-		mtrr_if->set(data->smp_reg, data->smp_base, 
+		mtrr_generic_set(data->smp_reg, data->smp_base,
 			     data->smp_size, data->smp_type);
 
 	atomic_dec(&data->count);
@@ -200,7 +198,7 @@  static inline int types_compatible(mtrr_type type1, mtrr_type type2) {
  * until it hits 0 and proceed. We set the data.gate flag and reset data.count.
  * Meanwhile, they are waiting for that flag to be set. Once it's set, each 
  * CPU goes through the transition of updating MTRRs. The CPU vendors may each do it 
- * differently, so we call mtrr_if->set() callback and let them take care of it.
+ * differently, so we call mtrr_generic_set() callback and let them take care of it.
  * When they're done, they again decrement data->count and wait for data.gate to 
  * be reset. 
  * When we finish, we wait for data.count to hit 0 and toggle the data.gate flag.
@@ -252,9 +250,9 @@  static void set_mtrr(unsigned int reg, unsigned long base,
 	if (reg == ~0U)  /* update all mtrr registers */
 		/* at boot or resume time, this will reinitialize the mtrrs on 
 		 * the bp. It is ok. */
-		mtrr_if->set_all();
+		mtrr_generic_set_all();
 	else /* update the single mtrr register */
-		mtrr_if->set(reg,base,size,type);
+		mtrr_generic_set(reg, base, size, type);
 
 	/* wait for the others */
 	while (atomic_read(&data.count))
@@ -317,10 +315,7 @@  int mtrr_add_page(unsigned long base, unsigned long size,
 	mtrr_type ltype;
 	unsigned long lbase, lsize;
 
-	if (!mtrr_if)
-		return -ENXIO;
-		
-	if ((error = mtrr_if->validate_add_page(base,size,type)))
+	if ((error = mtrr_generic_validate_add_page(base,size,type)))
 		return error;
 
 	if (type >= MTRR_NUM_TYPES) {
@@ -351,7 +346,7 @@  int mtrr_add_page(unsigned long base, unsigned long size,
 	/*  Search for existing MTRR  */
 	mutex_lock(&mtrr_mutex);
 	for (i = 0; i < num_var_ranges; ++i) {
-		mtrr_if->get(i, &lbase, &lsize, &ltype);
+		mtrr_generic_get(i, &lbase, &lsize, &ltype);
 		if (!lsize || base > lbase + lsize - 1 || base + size - 1 < lbase)
 			continue;
 		/*  At this point we know there is some kind of overlap/enclosure  */
@@ -386,7 +381,7 @@  int mtrr_add_page(unsigned long base, unsigned long size,
 		goto out;
 	}
 	/*  Search for an empty MTRR  */
-	i = mtrr_if->get_free_region(base, size, replace);
+	i = mtrr_generic_get_free_region(base, size, replace);
 	if (i >= 0) {
 		set_mtrr(i, base, size, type);
 		if (likely(replace < 0))
@@ -487,15 +482,12 @@  int mtrr_del_page(int reg, unsigned long base, unsigned long size)
 	unsigned long lbase, lsize;
 	int error = -EINVAL;
 
-	if (!mtrr_if)
-		return -ENXIO;
-
 	max = num_var_ranges;
 	mutex_lock(&mtrr_mutex);
 	if (reg < 0) {
 		/*  Search for existing MTRR  */
 		for (i = 0; i < max; ++i) {
-			mtrr_if->get(i, &lbase, &lsize, &ltype);
+			mtrr_generic_get(i, &lbase, &lsize, &ltype);
 			if (lbase == base && lsize == size) {
 				reg = i;
 				break;
@@ -511,7 +503,7 @@  int mtrr_del_page(int reg, unsigned long base, unsigned long size)
 		printk(KERN_WARNING "mtrr: register: %d too big\n", reg);
 		goto out;
 	}
-	mtrr_if->get(reg, &lbase, &lsize, &ltype);
+	mtrr_generic_get(reg, &lbase, &lsize, &ltype);
 	if (lsize < 1) {
 		printk(KERN_WARNING "mtrr: MTRR %d not used\n", reg);
 		goto out;
@@ -569,22 +561,19 @@  struct mtrr_value {
 void __init mtrr_bp_init(void)
 {
 	if (cpu_has_mtrr) {
-		mtrr_if = &generic_mtrr_ops;
 		size_or_mask = ~((1ULL << (paddr_bits - PAGE_SHIFT)) - 1);
 		size_and_mask = ~size_or_mask & 0xfffff00000ULL;
 	}
 
-	if (mtrr_if) {
-		set_num_var_ranges();
-		init_table();
-		if (use_intel())
-			get_mtrr_state();
-	}
+    set_num_var_ranges();
+    init_table();
+    if (use_intel())
+        get_mtrr_state();
 }
 
 void mtrr_ap_init(void)
 {
-	if (!mtrr_if || !use_intel() || hold_mtrr_updates_on_aps)
+	if (!use_intel() || hold_mtrr_updates_on_aps)
 		return;
 	/*
 	 * Ideally we should hold mtrr_mutex here to avoid mtrr entries changed,
@@ -630,13 +619,11 @@  void mtrr_bp_restore(void)
 {
 	if (!use_intel())
 		return;
-	mtrr_if->set_all();
+	mtrr_generic_set_all();
 }
 
 static int __init mtrr_init_finialize(void)
 {
-	if (!mtrr_if)
-		return 0;
 	if (use_intel())
 		mtrr_state_warn();
 	return 0;
diff --git a/xen/arch/x86/cpu/mtrr/mtrr.h b/xen/arch/x86/cpu/mtrr/mtrr.h
index 619575f..92b0b11 100644
--- a/xen/arch/x86/cpu/mtrr/mtrr.h
+++ b/xen/arch/x86/cpu/mtrr/mtrr.h
@@ -63,8 +63,8 @@  extern void set_mtrr_ops(const struct mtrr_ops *);
 extern u64 size_or_mask, size_and_mask;
 extern const struct mtrr_ops *mtrr_if;
 
-#define is_cpu(vnd)	(mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
-#define use_intel()	(mtrr_if && mtrr_if->use_intel_if == 1)
+#define is_cpu(vnd)	(X86_VENDOR_INTEL == X86_VENDOR_##vnd)
+#define use_intel()	(1)
 
 extern unsigned int num_var_ranges;
 
diff --git a/xen/arch/x86/platform_hypercall.c b/xen/arch/x86/platform_hypercall.c
index 780f22d..4a294b4 100644
--- a/xen/arch/x86/platform_hypercall.c
+++ b/xen/arch/x86/platform_hypercall.c
@@ -265,7 +265,7 @@  ret_t do_platform_op(XEN_GUEST_HANDLE_PARAM(xen_platform_op_t) u_xenpf_op)
         ret = -EINVAL;
         if ( op->u.read_memtype.reg < num_var_ranges )
         {
-            mtrr_if->get(op->u.read_memtype.reg, &mfn, &nr_mfns, &type);
+            mtrr_generic_get(op->u.read_memtype.reg, &mfn, &nr_mfns, &type);
             op->u.read_memtype.mfn     = mfn;
             op->u.read_memtype.nr_mfns = nr_mfns;
             op->u.read_memtype.type    = type;