diff mbox series

KVM: Consider SMT idle status when halt polling

Message ID 20210722035807.36937-1-lirongqing@baidu.com (mailing list archive)
State New, archived
Headers show
Series KVM: Consider SMT idle status when halt polling | expand

Commit Message

Li,Rongqing July 22, 2021, 3:58 a.m. UTC
SMT siblings share caches and other hardware, halt polling
will degrade its sibling performance if its sibling is busy

Signed-off-by: Li RongQing <lirongqing@baidu.com>
---
 include/linux/kvm_host.h |  5 ++++-
 include/linux/sched.h    | 17 +++++++++++++++++
 kernel/sched/fair.c      | 17 -----------------
 3 files changed, 21 insertions(+), 18 deletions(-)

Comments

Mika Penttilä July 22, 2021, 4:15 a.m. UTC | #1
On 22.7.2021 6.58, Li RongQing wrote:
> SMT siblings share caches and other hardware, halt polling
> will degrade its sibling performance if its sibling is busy
>
> Signed-off-by: Li RongQing <lirongqing@baidu.com>
> ---
>   include/linux/kvm_host.h |  5 ++++-
>   include/linux/sched.h    | 17 +++++++++++++++++
>   kernel/sched/fair.c      | 17 -----------------
>   3 files changed, 21 insertions(+), 18 deletions(-)
>
> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
> index ae7735b..15b3ef4 100644
> --- a/include/linux/kvm_host.h
> +++ b/include/linux/kvm_host.h
> @@ -269,7 +269,10 @@ static inline bool kvm_vcpu_mapped(struct kvm_host_map *map)
>   
>   static inline bool kvm_vcpu_can_poll(ktime_t cur, ktime_t stop)
>   {
> -	return single_task_running() && !need_resched() && ktime_before(cur, stop);
> +	return single_task_running() &&
> +		   !need_resched() &&
> +		   ktime_before(cur, stop) &&
> +		   is_core_idle(raw_smp_processor_id());
>   }
>   
>   /*
> diff --git a/include/linux/sched.h b/include/linux/sched.h
> index ec8d07d..c333218 100644
> --- a/include/linux/sched.h
> +++ b/include/linux/sched.h
> @@ -34,6 +34,7 @@
>   #include <linux/rseq.h>
>   #include <linux/seqlock.h>
>   #include <linux/kcsan.h>
> +#include <linux/topology.h>
>   #include <asm/kmap_size.h>
>   
>   /* task_struct member predeclarations (sorted alphabetically): */
> @@ -2191,6 +2192,22 @@ int sched_trace_rq_nr_running(struct rq *rq);
>   
>   const struct cpumask *sched_trace_rd_span(struct root_domain *rd);
>   
> +static inline bool is_core_idle(int cpu)
> +{
> +#ifdef CONFIG_SCHED_SMT
> +	int sibling;
> +
> +	for_each_cpu(sibling, cpu_smt_mask(cpu)) {
> +		if (cpu == sibling)
> +			continue;
> +
> +		if (!idle_cpu(cpu))
> +			return false;

if (!idle_cpu(sibling))  instead, now it returns always false.



> +	}
> +#endif
> +	return true;
> +}
> +
>   #ifdef CONFIG_SCHED_CORE
>   extern void sched_core_free(struct task_struct *tsk);
>   extern void sched_core_fork(struct task_struct *p);
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 44c4520..5b0259c 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -1477,23 +1477,6 @@ struct numa_stats {
>   	int idle_cpu;
>   };
>   
> -static inline bool is_core_idle(int cpu)
> -{
> -#ifdef CONFIG_SCHED_SMT
> -	int sibling;
> -
> -	for_each_cpu(sibling, cpu_smt_mask(cpu)) {
> -		if (cpu == sibling)
> -			continue;
> -
> -		if (!idle_cpu(cpu))
> -			return false;
> -	}
> -#endif
> -
> -	return true;
> -}
> -
>   struct task_numa_env {
>   	struct task_struct *p;
>
Li,Rongqing July 22, 2021, 5:22 a.m. UTC | #2
> > diff --git a/include/linux/sched.h b/include/linux/sched.h index
> > ec8d07d..c333218 100644
> > --- a/include/linux/sched.h
> > +++ b/include/linux/sched.h
> > @@ -34,6 +34,7 @@
> >   #include <linux/rseq.h>
> >   #include <linux/seqlock.h>
> >   #include <linux/kcsan.h>
> > +#include <linux/topology.h>
> >   #include <asm/kmap_size.h>
> >
> >   /* task_struct member predeclarations (sorted alphabetically): */ @@
> > -2191,6 +2192,22 @@ int sched_trace_rq_nr_running(struct rq *rq);
> >
> >   const struct cpumask *sched_trace_rd_span(struct root_domain *rd);
> >
> > +static inline bool is_core_idle(int cpu) { #ifdef CONFIG_SCHED_SMT
> > +	int sibling;
> > +
> > +	for_each_cpu(sibling, cpu_smt_mask(cpu)) {
> > +		if (cpu == sibling)
> > +			continue;
> > +
> > +		if (!idle_cpu(cpu))
> > +			return false;
> 
> if (!idle_cpu(sibling))  instead, now it returns always false.
> 

Good Catch. this is history bug.

Do you like to submit by yourself, or I submit on behalf you

Thanks

-Li
Wanpeng Li July 22, 2021, 5:55 a.m. UTC | #3
On Thu, 22 Jul 2021 at 12:07, Li RongQing <lirongqing@baidu.com> wrote:
>
> SMT siblings share caches and other hardware, halt polling
> will degrade its sibling performance if its sibling is busy

Do you have any real scenario benefits? As the polling nature, some
cloud providers will configure to their preferred balance of cpu usage
and performance, and other cloud providers for their NFV scenarios
which are more sensitive to latency are vCPU and pCPU 1:1 pin,you
destroy these setups.

    Wanpeng
Li,Rongqing July 22, 2021, 7:10 a.m. UTC | #4
> -----邮件原件-----
> 发件人: Wanpeng Li <kernellwp@gmail.com>
> 发送时间: 2021年7月22日 13:55
> 收件人: Li,Rongqing <lirongqing@baidu.com>
> 抄送: Paolo Bonzini <pbonzini@redhat.com>; Ingo Molnar
> <mingo@redhat.com>; Peter Zijlstra <peterz@infradead.org>; kvm
> <kvm@vger.kernel.org>; LKML <linux-kernel@vger.kernel.org>
> 主题: Re: [PATCH] KVM: Consider SMT idle status when halt polling
> 
> On Thu, 22 Jul 2021 at 12:07, Li RongQing <lirongqing@baidu.com> wrote:
> >
> > SMT siblings share caches and other hardware, halt polling will
> > degrade its sibling performance if its sibling is busy
> 
> Do you have any real scenario benefits? As the polling nature, some cloud
> providers will configure to their preferred balance of cpu usage and
> performance, and other cloud providers for their NFV scenarios which are more
> sensitive to latency are vCPU and pCPU 1:1 pin,you
> destroy these setups.
>
>     Wanpeng

True, it benefits for our real scenario.

this patch can lower our workload compute latency in our multiple cores VM which vCPU and pCPU is 1:1 pin, and the workload with lots of computation and networking packets.

-Li
Li,Rongqing July 22, 2021, 12:16 p.m. UTC | #5
> > > SMT siblings share caches and other hardware, halt polling will
> > > degrade its sibling performance if its sibling is busy
> >
> > Do you have any real scenario benefits? As the polling nature, some
> > cloud providers will configure to their preferred balance of cpu usage
> > and performance, and other cloud providers for their NFV scenarios
> > which are more sensitive to latency are vCPU and pCPU 1:1 pin,you
> > destroy these setups.
> >
> >     Wanpeng
> 


Run a copy (single thread) Unixbench, with or without a busy poll program in its SMT sibling,  and Unixbench score can lower 1/3 with SMT busy polling program

Can this case show this issue?

-Li 


> True, it benefits for our real scenario.
> 
> this patch can lower our workload compute latency in our multiple cores VM
> which vCPU and pCPU is 1:1 pin, and the workload with lots of computation and
> networking packets.
> 
> -Li
kernel test robot July 22, 2021, 1:35 p.m. UTC | #6
Hi Li,

Thank you for the patch! Yet something to improve:

[auto build test ERROR on tip/sched/core]
[also build test ERROR on vhost/linux-next v5.14-rc2 next-20210722]
[cannot apply to kvm/queue]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]

url:    https://github.com/0day-ci/linux/commits/Li-RongQing/KVM-Consider-SMT-idle-status-when-halt-polling/20210722-120700
base:   https://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git 031e3bd8986fffe31e1ddbf5264cccfe30c9abd7
config: s390-buildonly-randconfig-r003-20210722 (attached as .config)
compiler: clang version 13.0.0 (https://github.com/llvm/llvm-project 9625ca5b602616b2f5584e8a49ba93c52c141e40)
reproduce (this is a W=1 build):
        wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # install s390 cross compiling tool for clang build
        # apt-get install binutils-s390x-linux-gnu
        # https://github.com/0day-ci/linux/commit/df205655d183524ffe8547bcc0f230a6f3217566
        git remote add linux-review https://github.com/0day-ci/linux
        git fetch --no-tags linux-review Li-RongQing/KVM-Consider-SMT-idle-status-when-halt-polling/20210722-120700
        git checkout df205655d183524ffe8547bcc0f230a6f3217566
        # save the attached .config to linux build tree
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross ARCH=s390 

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>

All errors (new ones prefixed by >>, old ones prefixed by <<):

>> ERROR: modpost: "idle_cpu" [arch/s390/kvm/kvm.ko] undefined!

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
kernel test robot July 22, 2021, 2:52 p.m. UTC | #7
Hi Li,

Thank you for the patch! Yet something to improve:

[auto build test ERROR on tip/sched/core]
[also build test ERROR on vhost/linux-next v5.14-rc2 next-20210722]
[cannot apply to kvm/queue]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]

url:    https://github.com/0day-ci/linux/commits/Li-RongQing/KVM-Consider-SMT-idle-status-when-halt-polling/20210722-120700
base:   https://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git 031e3bd8986fffe31e1ddbf5264cccfe30c9abd7
config: x86_64-kexec (attached as .config)
compiler: gcc-9 (Debian 9.3.0-22) 9.3.0
reproduce (this is a W=1 build):
        # https://github.com/0day-ci/linux/commit/df205655d183524ffe8547bcc0f230a6f3217566
        git remote add linux-review https://github.com/0day-ci/linux
        git fetch --no-tags linux-review Li-RongQing/KVM-Consider-SMT-idle-status-when-halt-polling/20210722-120700
        git checkout df205655d183524ffe8547bcc0f230a6f3217566
        # save the attached .config to linux build tree
        make W=1 ARCH=x86_64 

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>

All errors (new ones prefixed by >>, old ones prefixed by <<):

>> ERROR: modpost: "idle_cpu" [arch/x86/kvm/kvm.ko] undefined!

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
Dongli Zhang July 22, 2021, 3:07 p.m. UTC | #8
Hi RongQing,

Would you mind share if there is any performance data to demonstrate how much
performance can be improved?

Thank you very much!

Dongli Zhang

On 7/21/21 8:58 PM, Li RongQing wrote:
> SMT siblings share caches and other hardware, halt polling
> will degrade its sibling performance if its sibling is busy
> 
> Signed-off-by: Li RongQing <lirongqing@baidu.com>
> ---
>  include/linux/kvm_host.h |  5 ++++-
>  include/linux/sched.h    | 17 +++++++++++++++++
>  kernel/sched/fair.c      | 17 -----------------
>  3 files changed, 21 insertions(+), 18 deletions(-)
> 
> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
> index ae7735b..15b3ef4 100644
> --- a/include/linux/kvm_host.h
> +++ b/include/linux/kvm_host.h
> @@ -269,7 +269,10 @@ static inline bool kvm_vcpu_mapped(struct kvm_host_map *map)
>  
>  static inline bool kvm_vcpu_can_poll(ktime_t cur, ktime_t stop)
>  {
> -	return single_task_running() && !need_resched() && ktime_before(cur, stop);
> +	return single_task_running() &&
> +		   !need_resched() &&
> +		   ktime_before(cur, stop) &&
> +		   is_core_idle(raw_smp_processor_id());
>  }
>  
>  /*
> diff --git a/include/linux/sched.h b/include/linux/sched.h
> index ec8d07d..c333218 100644
> --- a/include/linux/sched.h
> +++ b/include/linux/sched.h
> @@ -34,6 +34,7 @@
>  #include <linux/rseq.h>
>  #include <linux/seqlock.h>
>  #include <linux/kcsan.h>
> +#include <linux/topology.h>
>  #include <asm/kmap_size.h>
>  
>  /* task_struct member predeclarations (sorted alphabetically): */
> @@ -2191,6 +2192,22 @@ int sched_trace_rq_nr_running(struct rq *rq);
>  
>  const struct cpumask *sched_trace_rd_span(struct root_domain *rd);
>  
> +static inline bool is_core_idle(int cpu)
> +{
> +#ifdef CONFIG_SCHED_SMT
> +	int sibling;
> +
> +	for_each_cpu(sibling, cpu_smt_mask(cpu)) {
> +		if (cpu == sibling)
> +			continue;
> +
> +		if (!idle_cpu(cpu))
> +			return false;
> +	}
> +#endif
> +	return true;
> +}
> +
>  #ifdef CONFIG_SCHED_CORE
>  extern void sched_core_free(struct task_struct *tsk);
>  extern void sched_core_fork(struct task_struct *p);
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 44c4520..5b0259c 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -1477,23 +1477,6 @@ struct numa_stats {
>  	int idle_cpu;
>  };
>  
> -static inline bool is_core_idle(int cpu)
> -{
> -#ifdef CONFIG_SCHED_SMT
> -	int sibling;
> -
> -	for_each_cpu(sibling, cpu_smt_mask(cpu)) {
> -		if (cpu == sibling)
> -			continue;
> -
> -		if (!idle_cpu(cpu))
> -			return false;
> -	}
> -#endif
> -
> -	return true;
> -}
> -
>  struct task_numa_env {
>  	struct task_struct *p;
>  
>
Dongli Zhang July 22, 2021, 3:20 p.m. UTC | #9
Please ignore my question. I saw the data from discussion.

"Run a copy (single thread) Unixbench, with or without a busy poll program in
its SMT sibling,  and Unixbench score can lower 1/3 with SMT busy polling program"

Dongli Zhang

On 7/22/21 8:07 AM, Dongli Zhang wrote:
> Hi RongQing,
> 
> Would you mind share if there is any performance data to demonstrate how much
> performance can be improved?
> 
> Thank you very much!
> 
> Dongli Zhang
> 
> On 7/21/21 8:58 PM, Li RongQing wrote:
>> SMT siblings share caches and other hardware, halt polling
>> will degrade its sibling performance if its sibling is busy
>>
>> Signed-off-by: Li RongQing <lirongqing@baidu.com>
>> ---
>>  include/linux/kvm_host.h |  5 ++++-
>>  include/linux/sched.h    | 17 +++++++++++++++++
>>  kernel/sched/fair.c      | 17 -----------------
>>  3 files changed, 21 insertions(+), 18 deletions(-)
>>
>> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
>> index ae7735b..15b3ef4 100644
>> --- a/include/linux/kvm_host.h
>> +++ b/include/linux/kvm_host.h
>> @@ -269,7 +269,10 @@ static inline bool kvm_vcpu_mapped(struct kvm_host_map *map)
>>  
>>  static inline bool kvm_vcpu_can_poll(ktime_t cur, ktime_t stop)
>>  {
>> -	return single_task_running() && !need_resched() && ktime_before(cur, stop);
>> +	return single_task_running() &&
>> +		   !need_resched() &&
>> +		   ktime_before(cur, stop) &&
>> +		   is_core_idle(raw_smp_processor_id());
>>  }
>>  
>>  /*
>> diff --git a/include/linux/sched.h b/include/linux/sched.h
>> index ec8d07d..c333218 100644
>> --- a/include/linux/sched.h
>> +++ b/include/linux/sched.h
>> @@ -34,6 +34,7 @@
>>  #include <linux/rseq.h>
>>  #include <linux/seqlock.h>
>>  #include <linux/kcsan.h>
>> +#include <linux/topology.h>
>>  #include <asm/kmap_size.h>
>>  
>>  /* task_struct member predeclarations (sorted alphabetically): */
>> @@ -2191,6 +2192,22 @@ int sched_trace_rq_nr_running(struct rq *rq);
>>  
>>  const struct cpumask *sched_trace_rd_span(struct root_domain *rd);
>>  
>> +static inline bool is_core_idle(int cpu)
>> +{
>> +#ifdef CONFIG_SCHED_SMT
>> +	int sibling;
>> +
>> +	for_each_cpu(sibling, cpu_smt_mask(cpu)) {
>> +		if (cpu == sibling)
>> +			continue;
>> +
>> +		if (!idle_cpu(cpu))
>> +			return false;
>> +	}
>> +#endif
>> +	return true;
>> +}
>> +
>>  #ifdef CONFIG_SCHED_CORE
>>  extern void sched_core_free(struct task_struct *tsk);
>>  extern void sched_core_fork(struct task_struct *p);
>> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
>> index 44c4520..5b0259c 100644
>> --- a/kernel/sched/fair.c
>> +++ b/kernel/sched/fair.c
>> @@ -1477,23 +1477,6 @@ struct numa_stats {
>>  	int idle_cpu;
>>  };
>>  
>> -static inline bool is_core_idle(int cpu)
>> -{
>> -#ifdef CONFIG_SCHED_SMT
>> -	int sibling;
>> -
>> -	for_each_cpu(sibling, cpu_smt_mask(cpu)) {
>> -		if (cpu == sibling)
>> -			continue;
>> -
>> -		if (!idle_cpu(cpu))
>> -			return false;
>> -	}
>> -#endif
>> -
>> -	return true;
>> -}
>> -
>>  struct task_numa_env {
>>  	struct task_struct *p;
>>  
>>
Sean Christopherson July 27, 2021, 1:25 a.m. UTC | #10
On Thu, Jul 22, 2021, Li,Rongqing wrote:
> > > > SMT siblings share caches and other hardware, halt polling will
> > > > degrade its sibling performance if its sibling is busy
> > >
> > > Do you have any real scenario benefits? As the polling nature, some
> > > cloud providers will configure to their preferred balance of cpu usage
> > > and performance, and other cloud providers for their NFV scenarios
> > > which are more sensitive to latency are vCPU and pCPU 1:1 pin,you
> > > destroy these setups.
> > >
> > >     Wanpeng
> > 
> 
> 
> Run a copy (single thread) Unixbench, with or without a busy poll program in
> its SMT sibling,  and Unixbench score can lower 1/3 with SMT busy polling
> program

Rather than disallowing halt-polling entirely, on x86 it should be sufficient to
simply have the hardware thread yield to its sibling(s) via PAUSE.  It probably
won't get back all performance, but I would expect it to be close. 

This compiles on all KVM architectures, and AFAICT the intended usage of
cpu_relax() is identical for all architectures.

diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 6980dabe9df5..a07ecb3c67fb 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -3111,6 +3111,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
                                goto out;
                        }
                        poll_end = cur = ktime_get();
+                       cpu_relax();
                } while (kvm_vcpu_can_poll(cur, stop));
        }
Li,Rongqing July 27, 2021, 6:39 a.m. UTC | #11
> -----邮件原件-----
> 发件人: Sean Christopherson <seanjc@google.com>
> 发送时间: 2021年7月27日 9:26
> 收件人: Li,Rongqing <lirongqing@baidu.com>
> 抄送: Wanpeng Li <kernellwp@gmail.com>; Paolo Bonzini
> <pbonzini@redhat.com>; Ingo Molnar <mingo@redhat.com>; Peter Zijlstra
> <peterz@infradead.org>; kvm <kvm@vger.kernel.org>; LKML
> <linux-kernel@vger.kernel.org>
> 主题: Re: 答复: [PATCH] KVM: Consider SMT idle status when halt polling
> 
> Rather than disallowing halt-polling entirely, on x86 it should be sufficient to
> simply have the hardware thread yield to its sibling(s) via PAUSE.  It probably
> won't get back all performance, but I would expect it to be close.
> 
> This compiles on all KVM architectures, and AFAICT the intended usage of
> cpu_relax() is identical for all architectures.
> 

Reasonable, thanks, I will resend it

-Li


> diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index
> 6980dabe9df5..a07ecb3c67fb 100644
> --- a/virt/kvm/kvm_main.c
> +++ b/virt/kvm/kvm_main.c
> @@ -3111,6 +3111,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
>                                 goto out;
>                         }
>                         poll_end = cur = ktime_get();
> +                       cpu_relax();
>                 } while (kvm_vcpu_can_poll(cur, stop));
>         }
>
diff mbox series

Patch

diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index ae7735b..15b3ef4 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -269,7 +269,10 @@  static inline bool kvm_vcpu_mapped(struct kvm_host_map *map)
 
 static inline bool kvm_vcpu_can_poll(ktime_t cur, ktime_t stop)
 {
-	return single_task_running() && !need_resched() && ktime_before(cur, stop);
+	return single_task_running() &&
+		   !need_resched() &&
+		   ktime_before(cur, stop) &&
+		   is_core_idle(raw_smp_processor_id());
 }
 
 /*
diff --git a/include/linux/sched.h b/include/linux/sched.h
index ec8d07d..c333218 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -34,6 +34,7 @@ 
 #include <linux/rseq.h>
 #include <linux/seqlock.h>
 #include <linux/kcsan.h>
+#include <linux/topology.h>
 #include <asm/kmap_size.h>
 
 /* task_struct member predeclarations (sorted alphabetically): */
@@ -2191,6 +2192,22 @@  int sched_trace_rq_nr_running(struct rq *rq);
 
 const struct cpumask *sched_trace_rd_span(struct root_domain *rd);
 
+static inline bool is_core_idle(int cpu)
+{
+#ifdef CONFIG_SCHED_SMT
+	int sibling;
+
+	for_each_cpu(sibling, cpu_smt_mask(cpu)) {
+		if (cpu == sibling)
+			continue;
+
+		if (!idle_cpu(cpu))
+			return false;
+	}
+#endif
+	return true;
+}
+
 #ifdef CONFIG_SCHED_CORE
 extern void sched_core_free(struct task_struct *tsk);
 extern void sched_core_fork(struct task_struct *p);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 44c4520..5b0259c 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1477,23 +1477,6 @@  struct numa_stats {
 	int idle_cpu;
 };
 
-static inline bool is_core_idle(int cpu)
-{
-#ifdef CONFIG_SCHED_SMT
-	int sibling;
-
-	for_each_cpu(sibling, cpu_smt_mask(cpu)) {
-		if (cpu == sibling)
-			continue;
-
-		if (!idle_cpu(cpu))
-			return false;
-	}
-#endif
-
-	return true;
-}
-
 struct task_numa_env {
 	struct task_struct *p;