diff mbox series

[3/8] xen/sched: don't use irqsave locks in dumping functions

Message ID 20200213125449.14226-4-jgross@suse.com (mailing list archive)
State New, archived
Headers show
Series xen: don't let keyhandlers block indefinitely on locks | expand

Commit Message

Jürgen Groß Feb. 13, 2020, 12:54 p.m. UTC
All dumping functions invoked by the "runq" keyhandler are called with
disabled interrupts, so there is no need to use the irqsave variants
of any locks in those functions.

Signed-off-by: Juergen Gross <jgross@suse.com>
---
 xen/common/sched/credit.c  | 10 ++++------
 xen/common/sched/credit2.c |  5 ++---
 xen/common/sched/null.c    | 10 ++++------
 xen/common/sched/rt.c      | 10 ++++------
 4 files changed, 14 insertions(+), 21 deletions(-)

Comments

Dario Faggioli Feb. 19, 2020, 12:40 p.m. UTC | #1
On Thu, 2020-02-13 at 13:54 +0100, Juergen Gross wrote:
> All dumping functions invoked by the "runq" keyhandler are called
> with
> disabled interrupts, so there is no need to use the irqsave variants
> of any locks in those functions.
> 
> 
To me, this patch looks pretty independent from the series. I.e.,
whatever it is that we will do with locking for keyhandlers, it's ok
(actually, it's better!) to just use spin_lock() in scheduler specific
code.

So:

> Signed-off-by: Juergen Gross <jgross@suse.com>
>
Reviewed-by: Dario Faggioli <dfaggioli@suse.com>

Regards
Jan Beulich Feb. 19, 2020, 2:27 p.m. UTC | #2
On 13.02.2020 13:54, Juergen Gross wrote:
> All dumping functions invoked by the "runq" keyhandler are called with
> disabled interrupts,

Is this actually needed for anything? It means not servicing
interrupts for perhaps an extended period of time. Debug keys
aren't promised to be non-intrusive, but they also shouldn't
be more intrusive than really needed. Wouldn't it therefore
be better to keep locking as it is now, and instead make sure
interrupts get turned off elsewhere (if needed) for much
shorter periods of time?

Jan

> so there is no need to use the irqsave variants
> of any locks in those functions.
> 
> Signed-off-by: Juergen Gross <jgross@suse.com>
> ---
>  xen/common/sched/credit.c  | 10 ++++------
>  xen/common/sched/credit2.c |  5 ++---
>  xen/common/sched/null.c    | 10 ++++------
>  xen/common/sched/rt.c      | 10 ++++------
>  4 files changed, 14 insertions(+), 21 deletions(-)
> 
> diff --git a/xen/common/sched/credit.c b/xen/common/sched/credit.c
> index 05946eea6e..dee87e7fe2 100644
> --- a/xen/common/sched/credit.c
> +++ b/xen/common/sched/credit.c
> @@ -2048,7 +2048,6 @@ csched_dump_pcpu(const struct scheduler *ops, int cpu)
>      const struct csched_pcpu *spc;
>      const struct csched_unit *svc;
>      spinlock_t *lock;
> -    unsigned long flags;
>      int loop;
>  
>      /*
> @@ -2058,7 +2057,7 @@ csched_dump_pcpu(const struct scheduler *ops, int cpu)
>       * - we scan through the runqueue, so we need the proper runqueue
>       *   lock (the one of the runqueue of this cpu).
>       */
> -    spin_lock_irqsave(&prv->lock, flags);
> +    spin_lock(&prv->lock);
>      lock = pcpu_schedule_lock(cpu);
>  
>      spc = CSCHED_PCPU(cpu);
> @@ -2089,7 +2088,7 @@ csched_dump_pcpu(const struct scheduler *ops, int cpu)
>      }
>  
>      pcpu_schedule_unlock(lock, cpu);
> -    spin_unlock_irqrestore(&prv->lock, flags);
> +    spin_unlock(&prv->lock);
>  }
>  
>  static void
> @@ -2098,9 +2097,8 @@ csched_dump(const struct scheduler *ops)
>      struct list_head *iter_sdom, *iter_svc;
>      struct csched_private *prv = CSCHED_PRIV(ops);
>      int loop;
> -    unsigned long flags;
>  
> -    spin_lock_irqsave(&prv->lock, flags);
> +    spin_lock(&prv->lock);
>  
>      printk("info:\n"
>             "\tncpus              = %u\n"
> @@ -2153,7 +2151,7 @@ csched_dump(const struct scheduler *ops)
>          }
>      }
>  
> -    spin_unlock_irqrestore(&prv->lock, flags);
> +    spin_unlock(&prv->lock);
>  }
>  
>  static int __init
> diff --git a/xen/common/sched/credit2.c b/xen/common/sched/credit2.c
> index f2752f27e2..e76d2ed543 100644
> --- a/xen/common/sched/credit2.c
> +++ b/xen/common/sched/credit2.c
> @@ -3649,14 +3649,13 @@ csched2_dump(const struct scheduler *ops)
>  {
>      struct list_head *iter_sdom;
>      struct csched2_private *prv = csched2_priv(ops);
> -    unsigned long flags;
>      unsigned int i, j, loop;
>  
>      /*
>       * We need the private scheduler lock as we access global
>       * scheduler data and (below) the list of active domains.
>       */
> -    read_lock_irqsave(&prv->lock, flags);
> +    read_lock(&prv->lock);
>  
>      printk("Active queues: %d\n"
>             "\tdefault-weight     = %d\n",
> @@ -3749,7 +3748,7 @@ csched2_dump(const struct scheduler *ops)
>          spin_unlock(&rqd->lock);
>      }
>  
> -    read_unlock_irqrestore(&prv->lock, flags);
> +    read_unlock(&prv->lock);
>  }
>  
>  static void *
> diff --git a/xen/common/sched/null.c b/xen/common/sched/null.c
> index 8c3101649d..3b31703d7e 100644
> --- a/xen/common/sched/null.c
> +++ b/xen/common/sched/null.c
> @@ -954,9 +954,8 @@ static void null_dump_pcpu(const struct scheduler *ops, int cpu)
>      const struct null_pcpu *npc = get_sched_res(cpu)->sched_priv;
>      const struct null_unit *nvc;
>      spinlock_t *lock;
> -    unsigned long flags;
>  
> -    lock = pcpu_schedule_lock_irqsave(cpu, &flags);
> +    lock = pcpu_schedule_lock(cpu);
>  
>      printk("CPU[%02d] sibling={%*pbl}, core={%*pbl}",
>             cpu, CPUMASK_PR(per_cpu(cpu_sibling_mask, cpu)),
> @@ -974,17 +973,16 @@ static void null_dump_pcpu(const struct scheduler *ops, int cpu)
>          printk("\n");
>      }
>  
> -    pcpu_schedule_unlock_irqrestore(lock, flags, cpu);
> +    pcpu_schedule_unlock(lock, cpu);
>  }
>  
>  static void null_dump(const struct scheduler *ops)
>  {
>      struct null_private *prv = null_priv(ops);
>      struct list_head *iter;
> -    unsigned long flags;
>      unsigned int loop;
>  
> -    spin_lock_irqsave(&prv->lock, flags);
> +    spin_lock(&prv->lock);
>  
>      printk("\tcpus_free = %*pbl\n", CPUMASK_PR(&prv->cpus_free));
>  
> @@ -1029,7 +1027,7 @@ static void null_dump(const struct scheduler *ops)
>      printk("\n");
>      spin_unlock(&prv->waitq_lock);
>  
> -    spin_unlock_irqrestore(&prv->lock, flags);
> +    spin_unlock(&prv->lock);
>  }
>  
>  static const struct scheduler sched_null_def = {
> diff --git a/xen/common/sched/rt.c b/xen/common/sched/rt.c
> index 66585ed50a..16379cb2d2 100644
> --- a/xen/common/sched/rt.c
> +++ b/xen/common/sched/rt.c
> @@ -353,9 +353,8 @@ rt_dump_pcpu(const struct scheduler *ops, int cpu)
>  {
>      struct rt_private *prv = rt_priv(ops);
>      const struct rt_unit *svc;
> -    unsigned long flags;
>  
> -    spin_lock_irqsave(&prv->lock, flags);
> +    spin_lock(&prv->lock);
>      printk("CPU[%02d]\n", cpu);
>      /* current UNIT (nothing to say if that's the idle unit). */
>      svc = rt_unit(curr_on_cpu(cpu));
> @@ -363,7 +362,7 @@ rt_dump_pcpu(const struct scheduler *ops, int cpu)
>      {
>          rt_dump_unit(ops, svc);
>      }
> -    spin_unlock_irqrestore(&prv->lock, flags);
> +    spin_unlock(&prv->lock);
>  }
>  
>  static void
> @@ -373,9 +372,8 @@ rt_dump(const struct scheduler *ops)
>      struct rt_private *prv = rt_priv(ops);
>      const struct rt_unit *svc;
>      const struct rt_dom *sdom;
> -    unsigned long flags;
>  
> -    spin_lock_irqsave(&prv->lock, flags);
> +    spin_lock(&prv->lock);
>  
>      if ( list_empty(&prv->sdom) )
>          goto out;
> @@ -421,7 +419,7 @@ rt_dump(const struct scheduler *ops)
>      }
>  
>   out:
> -    spin_unlock_irqrestore(&prv->lock, flags);
> +    spin_unlock(&prv->lock);
>  }
>  
>  /*
>
Jürgen Groß Feb. 19, 2020, 3:02 p.m. UTC | #3
On 19.02.20 15:27, Jan Beulich wrote:
> On 13.02.2020 13:54, Juergen Gross wrote:
>> All dumping functions invoked by the "runq" keyhandler are called with
>> disabled interrupts,
> 
> Is this actually needed for anything? It means not servicing
> interrupts for perhaps an extended period of time. Debug keys
> aren't promised to be non-intrusive, but they also shouldn't
> be more intrusive than really needed. Wouldn't it therefore
> be better to keep locking as it is now, and instead make sure
> interrupts get turned off elsewhere (if needed) for much
> shorter periods of time?

Indeed this is the better option. I just checked the code and
think blindly turning interrupts off is not needed.

I'll rework the patch and send it out separately.


Juergen
Dario Faggioli Feb. 19, 2020, 3:47 p.m. UTC | #4
On Wed, 2020-02-19 at 16:02 +0100, Jürgen Groß wrote:
> On 19.02.20 15:27, Jan Beulich wrote:
> > On 13.02.2020 13:54, Juergen Gross wrote:
> > > All dumping functions invoked by the "runq" keyhandler are called
> > > with
> > > disabled interrupts,
> > 
> > Is this actually needed for anything? It means not servicing
> > interrupts for perhaps an extended period of time. Debug keys
> > aren't promised to be non-intrusive, but they also shouldn't
> > be more intrusive than really needed. Wouldn't it therefore
> > be better to keep locking as it is now, and instead make sure
> > interrupts get turned off elsewhere (if needed) for much
> > shorter periods of time?
> 
> Indeed this is the better option. I just checked the code and
> think blindly turning interrupts off is not needed.
> 
Well, yes... Assuming you are referring to the IRQ being disabled in
cpupool.c:dump_runq(), my impression is that we can get rid of that,
and leave the sched-specific code (more or less) as it is (for the sake
of runqueue lock irq-safety).

Regards
diff mbox series

Patch

diff --git a/xen/common/sched/credit.c b/xen/common/sched/credit.c
index 05946eea6e..dee87e7fe2 100644
--- a/xen/common/sched/credit.c
+++ b/xen/common/sched/credit.c
@@ -2048,7 +2048,6 @@  csched_dump_pcpu(const struct scheduler *ops, int cpu)
     const struct csched_pcpu *spc;
     const struct csched_unit *svc;
     spinlock_t *lock;
-    unsigned long flags;
     int loop;
 
     /*
@@ -2058,7 +2057,7 @@  csched_dump_pcpu(const struct scheduler *ops, int cpu)
      * - we scan through the runqueue, so we need the proper runqueue
      *   lock (the one of the runqueue of this cpu).
      */
-    spin_lock_irqsave(&prv->lock, flags);
+    spin_lock(&prv->lock);
     lock = pcpu_schedule_lock(cpu);
 
     spc = CSCHED_PCPU(cpu);
@@ -2089,7 +2088,7 @@  csched_dump_pcpu(const struct scheduler *ops, int cpu)
     }
 
     pcpu_schedule_unlock(lock, cpu);
-    spin_unlock_irqrestore(&prv->lock, flags);
+    spin_unlock(&prv->lock);
 }
 
 static void
@@ -2098,9 +2097,8 @@  csched_dump(const struct scheduler *ops)
     struct list_head *iter_sdom, *iter_svc;
     struct csched_private *prv = CSCHED_PRIV(ops);
     int loop;
-    unsigned long flags;
 
-    spin_lock_irqsave(&prv->lock, flags);
+    spin_lock(&prv->lock);
 
     printk("info:\n"
            "\tncpus              = %u\n"
@@ -2153,7 +2151,7 @@  csched_dump(const struct scheduler *ops)
         }
     }
 
-    spin_unlock_irqrestore(&prv->lock, flags);
+    spin_unlock(&prv->lock);
 }
 
 static int __init
diff --git a/xen/common/sched/credit2.c b/xen/common/sched/credit2.c
index f2752f27e2..e76d2ed543 100644
--- a/xen/common/sched/credit2.c
+++ b/xen/common/sched/credit2.c
@@ -3649,14 +3649,13 @@  csched2_dump(const struct scheduler *ops)
 {
     struct list_head *iter_sdom;
     struct csched2_private *prv = csched2_priv(ops);
-    unsigned long flags;
     unsigned int i, j, loop;
 
     /*
      * We need the private scheduler lock as we access global
      * scheduler data and (below) the list of active domains.
      */
-    read_lock_irqsave(&prv->lock, flags);
+    read_lock(&prv->lock);
 
     printk("Active queues: %d\n"
            "\tdefault-weight     = %d\n",
@@ -3749,7 +3748,7 @@  csched2_dump(const struct scheduler *ops)
         spin_unlock(&rqd->lock);
     }
 
-    read_unlock_irqrestore(&prv->lock, flags);
+    read_unlock(&prv->lock);
 }
 
 static void *
diff --git a/xen/common/sched/null.c b/xen/common/sched/null.c
index 8c3101649d..3b31703d7e 100644
--- a/xen/common/sched/null.c
+++ b/xen/common/sched/null.c
@@ -954,9 +954,8 @@  static void null_dump_pcpu(const struct scheduler *ops, int cpu)
     const struct null_pcpu *npc = get_sched_res(cpu)->sched_priv;
     const struct null_unit *nvc;
     spinlock_t *lock;
-    unsigned long flags;
 
-    lock = pcpu_schedule_lock_irqsave(cpu, &flags);
+    lock = pcpu_schedule_lock(cpu);
 
     printk("CPU[%02d] sibling={%*pbl}, core={%*pbl}",
            cpu, CPUMASK_PR(per_cpu(cpu_sibling_mask, cpu)),
@@ -974,17 +973,16 @@  static void null_dump_pcpu(const struct scheduler *ops, int cpu)
         printk("\n");
     }
 
-    pcpu_schedule_unlock_irqrestore(lock, flags, cpu);
+    pcpu_schedule_unlock(lock, cpu);
 }
 
 static void null_dump(const struct scheduler *ops)
 {
     struct null_private *prv = null_priv(ops);
     struct list_head *iter;
-    unsigned long flags;
     unsigned int loop;
 
-    spin_lock_irqsave(&prv->lock, flags);
+    spin_lock(&prv->lock);
 
     printk("\tcpus_free = %*pbl\n", CPUMASK_PR(&prv->cpus_free));
 
@@ -1029,7 +1027,7 @@  static void null_dump(const struct scheduler *ops)
     printk("\n");
     spin_unlock(&prv->waitq_lock);
 
-    spin_unlock_irqrestore(&prv->lock, flags);
+    spin_unlock(&prv->lock);
 }
 
 static const struct scheduler sched_null_def = {
diff --git a/xen/common/sched/rt.c b/xen/common/sched/rt.c
index 66585ed50a..16379cb2d2 100644
--- a/xen/common/sched/rt.c
+++ b/xen/common/sched/rt.c
@@ -353,9 +353,8 @@  rt_dump_pcpu(const struct scheduler *ops, int cpu)
 {
     struct rt_private *prv = rt_priv(ops);
     const struct rt_unit *svc;
-    unsigned long flags;
 
-    spin_lock_irqsave(&prv->lock, flags);
+    spin_lock(&prv->lock);
     printk("CPU[%02d]\n", cpu);
     /* current UNIT (nothing to say if that's the idle unit). */
     svc = rt_unit(curr_on_cpu(cpu));
@@ -363,7 +362,7 @@  rt_dump_pcpu(const struct scheduler *ops, int cpu)
     {
         rt_dump_unit(ops, svc);
     }
-    spin_unlock_irqrestore(&prv->lock, flags);
+    spin_unlock(&prv->lock);
 }
 
 static void
@@ -373,9 +372,8 @@  rt_dump(const struct scheduler *ops)
     struct rt_private *prv = rt_priv(ops);
     const struct rt_unit *svc;
     const struct rt_dom *sdom;
-    unsigned long flags;
 
-    spin_lock_irqsave(&prv->lock, flags);
+    spin_lock(&prv->lock);
 
     if ( list_empty(&prv->sdom) )
         goto out;
@@ -421,7 +419,7 @@  rt_dump(const struct scheduler *ops)
     }
 
  out:
-    spin_unlock_irqrestore(&prv->lock, flags);
+    spin_unlock(&prv->lock);
 }
 
 /*