diff mbox series

[PATCHv2,3/4] arm64: do not use dummy vcpu_is_preempted()

Message ID 20210709043713.887098-4-senozhatsky@chromium.org (mailing list archive)
State New, archived
Headers show
Series arm64:kvm: teach guest sched that VCPUs can be preempted | expand

Commit Message

Sergey Senozhatsky July 9, 2021, 4:37 a.m. UTC
vcpu_is_preempted() now can represent the actual state of
the VCPU, so the scheduler can make better decisions when
it picks the idle CPU to enqueue a task on.

Signed-off-by: Sergey Senozhatsky <senozhatsky@chromium.org>
---
 arch/arm64/include/asm/spinlock.h | 18 ++++++++++--------
 1 file changed, 10 insertions(+), 8 deletions(-)

Comments

Marc Zyngier July 12, 2021, 3:47 p.m. UTC | #1
On Fri, 09 Jul 2021 05:37:12 +0100,
Sergey Senozhatsky <senozhatsky@chromium.org> wrote:
> 
> vcpu_is_preempted() now can represent the actual state of
> the VCPU, so the scheduler can make better decisions when
> it picks the idle CPU to enqueue a task on.
> 
> Signed-off-by: Sergey Senozhatsky <senozhatsky@chromium.org>
> ---
>  arch/arm64/include/asm/spinlock.h | 18 ++++++++++--------
>  1 file changed, 10 insertions(+), 8 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
> index 0525c0b089ed..1d579497e1b8 100644
> --- a/arch/arm64/include/asm/spinlock.h
> +++ b/arch/arm64/include/asm/spinlock.h
> @@ -7,21 +7,23 @@
>  
>  #include <asm/qspinlock.h>
>  #include <asm/qrwlock.h>
> +#include <asm/paravirt.h>
>  
>  /* See include/linux/spinlock.h */
>  #define smp_mb__after_spinlock()	smp_mb()
>  
> -/*
> - * Changing this will break osq_lock() thanks to the call inside
> - * smp_cond_load_relaxed().
> - *
> - * See:
> - * https://lore.kernel.org/lkml/20200110100612.GC2827@hirez.programming.kicks-ass.net
> - */

Why are you deleting this? Please explain your reasoning in the commit
message. It seems to me that it still makes complete sense when
CONFIG_PARAVIRT is not defined.

>  #define vcpu_is_preempted vcpu_is_preempted
> -static inline bool vcpu_is_preempted(int cpu)
> +
> +#ifdef CONFIG_PARAVIRT
> +static inline bool vcpu_is_preempted(unsigned int cpu)
> +{
> +	return paravirt_vcpu_is_preempted(cpu);
> +}
> +#else
> +static inline bool vcpu_is_preempted(unsigned int cpu)
>  {
>  	return false;
>  }
> +#endif /* CONFIG_PARAVIRT */
>  
>  #endif /* __ASM_SPINLOCK_H */

Thanks,

	M.
Sergey Senozhatsky July 21, 2021, 2:06 a.m. UTC | #2
On (21/07/12 16:47), Marc Zyngier wrote:
> >  #include <asm/qspinlock.h>
> >  #include <asm/qrwlock.h>
> > +#include <asm/paravirt.h>
> >  
> >  /* See include/linux/spinlock.h */
> >  #define smp_mb__after_spinlock()	smp_mb()
> >  
> > -/*
> > - * Changing this will break osq_lock() thanks to the call inside
> > - * smp_cond_load_relaxed().
> > - *
> > - * See:
> > - * https://lore.kernel.org/lkml/20200110100612.GC2827@hirez.programming.kicks-ass.net
> > - */
> 
> Why are you deleting this? Please explain your reasoning in the commit
> message. It seems to me that it still makes complete sense when
> CONFIG_PARAVIRT is not defined.

You are right. I'll move it to !PARAVIRT #else-branch.
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
index 0525c0b089ed..1d579497e1b8 100644
--- a/arch/arm64/include/asm/spinlock.h
+++ b/arch/arm64/include/asm/spinlock.h
@@ -7,21 +7,23 @@ 
 
 #include <asm/qspinlock.h>
 #include <asm/qrwlock.h>
+#include <asm/paravirt.h>
 
 /* See include/linux/spinlock.h */
 #define smp_mb__after_spinlock()	smp_mb()
 
-/*
- * Changing this will break osq_lock() thanks to the call inside
- * smp_cond_load_relaxed().
- *
- * See:
- * https://lore.kernel.org/lkml/20200110100612.GC2827@hirez.programming.kicks-ass.net
- */
 #define vcpu_is_preempted vcpu_is_preempted
-static inline bool vcpu_is_preempted(int cpu)
+
+#ifdef CONFIG_PARAVIRT
+static inline bool vcpu_is_preempted(unsigned int cpu)
+{
+	return paravirt_vcpu_is_preempted(cpu);
+}
+#else
+static inline bool vcpu_is_preempted(unsigned int cpu)
 {
 	return false;
 }
+#endif /* CONFIG_PARAVIRT */
 
 #endif /* __ASM_SPINLOCK_H */