diff mbox series

[PATCH-for-4.17] xen/sched: migrate timers to correct cpus after suspend

Message ID 20221021145357.17931-1-jgross@suse.com (mailing list archive)
State Superseded
Headers show
Series [PATCH-for-4.17] xen/sched: migrate timers to correct cpus after suspend | expand

Commit Message

Jürgen Groß Oct. 21, 2022, 2:53 p.m. UTC
Today all timers are migrated to cpu 0 when the system is being
suspended. They are not migrated back after resuming the system again.

This results (at least) to problems with the credit scheduler, as the
timer isn't handled on the cpu it was expected to occur.

Add migrating the scheduling related timers of a specific cpu from cpu
0 back to its original cpu when that cpu has gone up when resuming the
system.

Signed-off-by: Juergen Gross <jgross@suse.com>
---
This is an alternative approach to this one:
https://lists.xen.org/archives/html/xen-devel/2022-09/msg00510.html
---
 xen/common/sched/core.c    | 23 +++++++++++++++
 xen/common/sched/cpupool.c |  2 ++
 xen/common/sched/credit.c  | 13 +++++++++
 xen/common/sched/private.h | 10 +++++++
 xen/common/sched/rt.c      | 58 ++++++++++++++++++++++++++------------
 5 files changed, 88 insertions(+), 18 deletions(-)

Comments

Marek Marczykowski-Górecki Oct. 27, 2022, 6:13 p.m. UTC | #1
On Fri, Oct 21, 2022 at 04:53:57PM +0200, Juergen Gross wrote:
> Today all timers are migrated to cpu 0 when the system is being
> suspended. They are not migrated back after resuming the system again.
> 
> This results (at least) to problems with the credit scheduler, as the
> timer isn't handled on the cpu it was expected to occur.
> 
> Add migrating the scheduling related timers of a specific cpu from cpu
> 0 back to its original cpu when that cpu has gone up when resuming the
> system.
> 
> Signed-off-by: Juergen Gross <jgross@suse.com>

I tested it in my setup, but it crashed:

(XEN) arch/x86/cpu/mcheck/mce_intel.c:770: MCA Capability: firstbank 0, extended MCE MSR 0, BCAST, CMCI
(XEN) CPU0 CMCI LVT vector (0xf1) already installed
(XEN) Finishing wakeup from ACPI S3 state.
(XEN) Enabling non-boot CPUs  ...
(XEN) Platform timer appears to have unexpectedly wrapped 3 times.
(XEN) ----[ Xen-4.17-rc  x86_64  debug=y  Tainted:   C    ]----
(XEN) CPU:    0
(XEN) RIP:    e008:[<ffff82d040250c7e>] sched_migrate_timers+0x4d/0xc9
(XEN) RFLAGS: 0000000000010202   CONTEXT: hypervisor
(XEN) rax: ffff82d0405c5298   rbx: 0000000000000000   rcx: 0000000000000001
(XEN) rdx: 0000003211219000   rsi: 0000000000000004   rdi: 0000000000000001
(XEN) rbp: ffff830256227d20   rsp: ffff830256227d18   r8:  ffff82d0405d2f78
(XEN) r9:  ffff82d0405ef8a0   r10: 00000000ffffffff   r11: 00000000002191c0
(XEN) r12: 0000000000000000   r13: 0000000000000001   r14: 0000000000000004
(XEN) r15: 0000000000000000   cr0: 000000008005003b   cr4: 00000000003526e0
(XEN) cr3: 0000000049677000   cr2: 0000000000000070
(XEN) fsb: 0000000000000000   gsb: 0000000000000000   gss: 0000000000000000
(XEN) ds: 0000   es: 0000   fs: 0000   gs: 0000   ss: 0000   cs: e008
(XEN) Xen code around <ffff82d040250c7e> (sched_migrate_timers+0x4d/0xc9):
(XEN)  48 8b 14 ca 48 8b 1c 02 <39> 7b 70 74 51 48 8d 05 56 34 37 00 48 89 e2 48
(XEN) Xen stack trace from rsp=ffff830256227d18:
(XEN)    0000000000000001 ffff830256227d58 ffff82d04023f1a0 ffff82d04047a308
(XEN)    ffff82d04047a300 ffff82d04047a060 0000000000000004 0000000000000000
(XEN)    ffff830256227da0 ffff82d040226a04 0000000000000000 0000000000000001
(XEN)    0000000000000001 0000000000000000 0000000000000001 ffff830256227fff
(XEN)    ffff82d04046c520 ffff830256227db8 ffff82d040207e75 0000000000000001
(XEN)    ffff830256227de0 ffff82d040208243 ffff82d04047a220 0000000000000001
(XEN)    0000000000000010 ffff830256227e18 ffff82d040208428 0000000000000200
(XEN)    0000000000000000 0000000000000003 ffff830256227ef8 ffff82d0405de6c0
(XEN)    ffff830256227e48 ffff82d04027a2df ffff830251491490 ffff830251757000
(XEN)    0000000000000000 0000000000000000 ffff830256227e68 ffff82d040209c73
(XEN)    ffff8302517571b8 ffff82d040479618 ffff830256227e88 ffff82d04022e484
(XEN)    ffff82d0405c41a0 ffff82d0405c41b0 ffff830256227eb8 ffff82d04022e76e
(XEN)    0000000000000000 0000000000007fff ffff82d0405caf00 ffff82d0405c41b0
(XEN)    ffff830256227ef0 ffff82d0402f455d ffff82d0402f44e5 ffff830251757000
(XEN)    ffff830256227ef8 ffff8302517f5000 0000000000000000 ffff830256227e18
(XEN)    0000000000000000 ffffc90040b43d60 0000000000003403 0000000000000000
(XEN)    0000000000000003 ffffffff82e37868 0000000000000246 0000000000000003
(XEN)    0000000000003403 0000000000003403 0000000000000000 ffffffff81e4a0ea
(XEN)    0000000000003403 0000000000000010 deadbeefdeadf00d 0000010000000000
(XEN)    ffffffff81e4a0ea 000000000000e033 0000000000000246 ffffc90040b43c30
(XEN) Xen call trace:
(XEN)    [<ffff82d040250c7e>] R sched_migrate_timers+0x4d/0xc9
(XEN)    [<ffff82d04023f1a0>] F cpupool.c#cpu_callback+0x13d/0x47e
(XEN)    [<ffff82d040226a04>] F notifier_call_chain+0x6c/0x96
(XEN)    [<ffff82d040207e75>] F cpu.c#cpu_notifier_call_chain+0x1b/0x36
(XEN)    [<ffff82d040208243>] F cpu_up+0xaf/0xc8
(XEN)    [<ffff82d040208428>] F enable_nonboot_cpus+0x7b/0x1ef
(XEN)    [<ffff82d04027a2df>] F power.c#enter_state_helper+0x156/0x5dc
(XEN)    [<ffff82d040209c73>] F domain.c#continue_hypercall_tasklet_handler+0x50/0xbf
(XEN)    [<ffff82d04022e484>] F tasklet.c#do_tasklet_work+0x7b/0xac
(XEN)    [<ffff82d04022e76e>] F do_tasklet+0x58/0x8a
(XEN)    [<ffff82d0402f455d>] F domain.c#idle_loop+0x78/0xe6
(XEN) 
(XEN) Pagetable walk from 0000000000000070:
(XEN)  L4[0x000] = 00000002517fb063 ffffffffffffffff
(XEN)  L3[0x000] = 00000002517fa063 ffffffffffffffff
(XEN)  L2[0x000] = 00000002517f9063 ffffffffffffffff
(XEN)  L1[0x000] = 0000000000000000 ffffffffffffffff
(XEN) 
(XEN) ****************************************
(XEN) Panic on CPU 0:
(XEN) FATAL PAGE FAULT
(XEN) [error_code=0000]
(XEN) Faulting linear address: 0000000000000070
(XEN) ****************************************
Jürgen Groß Oct. 28, 2022, 10:08 a.m. UTC | #2
On 27.10.22 20:13, Marek Marczykowski-Górecki wrote:
> On Fri, Oct 21, 2022 at 04:53:57PM +0200, Juergen Gross wrote:
>> Today all timers are migrated to cpu 0 when the system is being
>> suspended. They are not migrated back after resuming the system again.
>>
>> This results (at least) to problems with the credit scheduler, as the
>> timer isn't handled on the cpu it was expected to occur.
>>
>> Add migrating the scheduling related timers of a specific cpu from cpu
>> 0 back to its original cpu when that cpu has gone up when resuming the
>> system.
>>
>> Signed-off-by: Juergen Gross <jgross@suse.com>
> 
> I tested it in my setup, but it crashed:
> 
> (XEN) arch/x86/cpu/mcheck/mce_intel.c:770: MCA Capability: firstbank 0, extended MCE MSR 0, BCAST, CMCI
> (XEN) CPU0 CMCI LVT vector (0xf1) already installed
> (XEN) Finishing wakeup from ACPI S3 state.
> (XEN) Enabling non-boot CPUs  ...
> (XEN) Platform timer appears to have unexpectedly wrapped 3 times.
> (XEN) ----[ Xen-4.17-rc  x86_64  debug=y  Tainted:   C    ]----
> (XEN) CPU:    0
> (XEN) RIP:    e008:[<ffff82d040250c7e>] sched_migrate_timers+0x4d/0xc9
> (XEN) RFLAGS: 0000000000010202   CONTEXT: hypervisor
> (XEN) rax: ffff82d0405c5298   rbx: 0000000000000000   rcx: 0000000000000001
> (XEN) rdx: 0000003211219000   rsi: 0000000000000004   rdi: 0000000000000001
> (XEN) rbp: ffff830256227d20   rsp: ffff830256227d18   r8:  ffff82d0405d2f78
> (XEN) r9:  ffff82d0405ef8a0   r10: 00000000ffffffff   r11: 00000000002191c0
> (XEN) r12: 0000000000000000   r13: 0000000000000001   r14: 0000000000000004
> (XEN) r15: 0000000000000000   cr0: 000000008005003b   cr4: 00000000003526e0
> (XEN) cr3: 0000000049677000   cr2: 0000000000000070
> (XEN) fsb: 0000000000000000   gsb: 0000000000000000   gss: 0000000000000000
> (XEN) ds: 0000   es: 0000   fs: 0000   gs: 0000   ss: 0000   cs: e008
> (XEN) Xen code around <ffff82d040250c7e> (sched_migrate_timers+0x4d/0xc9):
> (XEN)  48 8b 14 ca 48 8b 1c 02 <39> 7b 70 74 51 48 8d 05 56 34 37 00 48 89 e2 48
> (XEN) Xen stack trace from rsp=ffff830256227d18:
> (XEN)    0000000000000001 ffff830256227d58 ffff82d04023f1a0 ffff82d04047a308
> (XEN)    ffff82d04047a300 ffff82d04047a060 0000000000000004 0000000000000000
> (XEN)    ffff830256227da0 ffff82d040226a04 0000000000000000 0000000000000001
> (XEN)    0000000000000001 0000000000000000 0000000000000001 ffff830256227fff
> (XEN)    ffff82d04046c520 ffff830256227db8 ffff82d040207e75 0000000000000001
> (XEN)    ffff830256227de0 ffff82d040208243 ffff82d04047a220 0000000000000001
> (XEN)    0000000000000010 ffff830256227e18 ffff82d040208428 0000000000000200
> (XEN)    0000000000000000 0000000000000003 ffff830256227ef8 ffff82d0405de6c0
> (XEN)    ffff830256227e48 ffff82d04027a2df ffff830251491490 ffff830251757000
> (XEN)    0000000000000000 0000000000000000 ffff830256227e68 ffff82d040209c73
> (XEN)    ffff8302517571b8 ffff82d040479618 ffff830256227e88 ffff82d04022e484
> (XEN)    ffff82d0405c41a0 ffff82d0405c41b0 ffff830256227eb8 ffff82d04022e76e
> (XEN)    0000000000000000 0000000000007fff ffff82d0405caf00 ffff82d0405c41b0
> (XEN)    ffff830256227ef0 ffff82d0402f455d ffff82d0402f44e5 ffff830251757000
> (XEN)    ffff830256227ef8 ffff8302517f5000 0000000000000000 ffff830256227e18
> (XEN)    0000000000000000 ffffc90040b43d60 0000000000003403 0000000000000000
> (XEN)    0000000000000003 ffffffff82e37868 0000000000000246 0000000000000003
> (XEN)    0000000000003403 0000000000003403 0000000000000000 ffffffff81e4a0ea
> (XEN)    0000000000003403 0000000000000010 deadbeefdeadf00d 0000010000000000
> (XEN)    ffffffff81e4a0ea 000000000000e033 0000000000000246 ffffc90040b43c30
> (XEN) Xen call trace:
> (XEN)    [<ffff82d040250c7e>] R sched_migrate_timers+0x4d/0xc9
> (XEN)    [<ffff82d04023f1a0>] F cpupool.c#cpu_callback+0x13d/0x47e
> (XEN)    [<ffff82d040226a04>] F notifier_call_chain+0x6c/0x96
> (XEN)    [<ffff82d040207e75>] F cpu.c#cpu_notifier_call_chain+0x1b/0x36
> (XEN)    [<ffff82d040208243>] F cpu_up+0xaf/0xc8
> (XEN)    [<ffff82d040208428>] F enable_nonboot_cpus+0x7b/0x1ef
> (XEN)    [<ffff82d04027a2df>] F power.c#enter_state_helper+0x156/0x5dc
> (XEN)    [<ffff82d040209c73>] F domain.c#continue_hypercall_tasklet_handler+0x50/0xbf
> (XEN)    [<ffff82d04022e484>] F tasklet.c#do_tasklet_work+0x7b/0xac
> (XEN)    [<ffff82d04022e76e>] F do_tasklet+0x58/0x8a
> (XEN)    [<ffff82d0402f455d>] F domain.c#idle_loop+0x78/0xe6
> (XEN)
> (XEN) Pagetable walk from 0000000000000070:
> (XEN)  L4[0x000] = 00000002517fb063 ffffffffffffffff
> (XEN)  L3[0x000] = 00000002517fa063 ffffffffffffffff
> (XEN)  L2[0x000] = 00000002517f9063 ffffffffffffffff
> (XEN)  L1[0x000] = 0000000000000000 ffffffffffffffff
> (XEN)
> (XEN) ****************************************
> (XEN) Panic on CPU 0:
> (XEN) FATAL PAGE FAULT
> (XEN) [error_code=0000]
> (XEN) Faulting linear address: 0000000000000070
> (XEN) ****************************************
> 

This is very weird. The data suggests that the scheduling resource pointer
for cpu 1 was NULL, but I can't see how this can be the case without causing
similar crashes without this patch.

Are there any additional patches related to cpu on/offlining or suspend/resume
in the hypervisor?


Juergen
Andrew Cooper Oct. 28, 2022, 10:12 a.m. UTC | #3
On 28/10/2022 11:08, Juergen Gross wrote:
> On 27.10.22 20:13, Marek Marczykowski-Górecki wrote:
>
> This is very weird. The data suggests that the scheduling resource
> pointer
> for cpu 1 was NULL, but I can't see how this can be the case without
> causing
> similar crashes without this patch.
>
> Are there any additional patches related to cpu on/offlining or
> suspend/resume
> in the hypervisor?

QubesOS runs with smt=0 by default.  Siblings ought to be parked at this
point.

~Andrew
Marek Marczykowski-Górecki Oct. 28, 2022, 10:22 a.m. UTC | #4
On Fri, Oct 28, 2022 at 10:12:36AM +0000, Andrew Cooper wrote:
> On 28/10/2022 11:08, Juergen Gross wrote:
> > On 27.10.22 20:13, Marek Marczykowski-Górecki wrote:
> >
> > This is very weird. The data suggests that the scheduling resource
> > pointer
> > for cpu 1 was NULL, but I can't see how this can be the case without
> > causing
> > similar crashes without this patch.
> >
> > Are there any additional patches related to cpu on/offlining or
> > suspend/resume
> > in the hypervisor?

No such patches, it was this:
https://github.com/marmarek/xen/commits/master-credit-timers

> QubesOS runs with smt=0 by default.  Siblings ought to be parked at this
> point.

Yes, indeed this test was with smt=off.
Jürgen Groß Oct. 28, 2022, 10:56 a.m. UTC | #5
On 28.10.22 12:22, Marek Marczykowski-Górecki wrote:
> On Fri, Oct 28, 2022 at 10:12:36AM +0000, Andrew Cooper wrote:
>> On 28/10/2022 11:08, Juergen Gross wrote:
>>> On 27.10.22 20:13, Marek Marczykowski-Górecki wrote:
>>>
>>> This is very weird. The data suggests that the scheduling resource
>>> pointer
>>> for cpu 1 was NULL, but I can't see how this can be the case without
>>> causing
>>> similar crashes without this patch.
>>>
>>> Are there any additional patches related to cpu on/offlining or
>>> suspend/resume
>>> in the hypervisor?
> 
> No such patches, it was this:
> https://github.com/marmarek/xen/commits/master-credit-timers
> 
>> QubesOS runs with smt=0 by default.  Siblings ought to be parked at this
>> point.
> 
> Yes, indeed this test was with smt=off.

Ah, this is subtle. The CPU_ONLINE notifiers are called for the siblings,
too, resulting in above error.

Preparing V2 of the patch.


Juergen
diff mbox series

Patch

diff --git a/xen/common/sched/core.c b/xen/common/sched/core.c
index 23fa6845a8..142d03ade5 100644
--- a/xen/common/sched/core.c
+++ b/xen/common/sched/core.c
@@ -1284,6 +1284,29 @@  static int cpu_disable_scheduler_check(unsigned int cpu)
     return 0;
 }
 
+/*
+ * Called after a cpu has come up again in a suspend/resume cycle.
+ * Migrate all timers for this cpu (they have been migrated to cpu 0 when the
+ * cpu was going down).
+ * Note that only timers related to a physical cpu are migrated, not the ones
+ * related to a vcpu or domain.
+ */
+void sched_migrate_timers(unsigned int cpu)
+{
+    struct sched_resource *sr;
+
+    rcu_read_lock(&sched_res_rculock);
+
+    sr = get_sched_res(cpu);
+    if ( sr->master_cpu == cpu )
+    {
+        migrate_timer(&sr->s_timer, cpu);
+        sched_move_timers(sr->scheduler, sr);
+    }
+
+    rcu_read_unlock(&sched_res_rculock);
+}
+
 /*
  * In general, this must be called with the scheduler lock held, because the
  * adjust_affinity hook may want to modify the vCPU state. However, when the
diff --git a/xen/common/sched/cpupool.c b/xen/common/sched/cpupool.c
index b2c6f520c3..bdf6030ab0 100644
--- a/xen/common/sched/cpupool.c
+++ b/xen/common/sched/cpupool.c
@@ -1035,6 +1035,8 @@  static int cf_check cpu_callback(
     case CPU_ONLINE:
         if ( system_state <= SYS_STATE_active )
             rc = cpupool_cpu_add(cpu);
+        else
+            sched_migrate_timers(cpu);
         break;
     case CPU_DOWN_PREPARE:
         /* Suspend/Resume don't change assignments of cpus to cpupools. */
diff --git a/xen/common/sched/credit.c b/xen/common/sched/credit.c
index 47945c2834..f2cd3d9da3 100644
--- a/xen/common/sched/credit.c
+++ b/xen/common/sched/credit.c
@@ -614,6 +614,18 @@  init_pdata(struct csched_private *prv, struct csched_pcpu *spc, int cpu)
     spc->nr_runnable = 0;
 }
 
+static void cf_check
+csched_move_timers(const struct scheduler *ops, struct sched_resource *sr)
+{
+    struct csched_private *prv = CSCHED_PRIV(ops);
+    struct csched_pcpu *spc = sr->sched_priv;
+
+    if ( sr->master_cpu == prv->master )
+        migrate_timer(&prv->master_ticker, prv->master);
+
+    migrate_timer(&spc->ticker, sr->master_cpu);
+}
+
 /* Change the scheduler of cpu to us (Credit). */
 static spinlock_t *cf_check
 csched_switch_sched(struct scheduler *new_ops, unsigned int cpu,
@@ -2264,6 +2276,7 @@  static const struct scheduler sched_credit_def = {
     .switch_sched   = csched_switch_sched,
     .alloc_domdata  = csched_alloc_domdata,
     .free_domdata   = csched_free_domdata,
+    .move_timers    = csched_move_timers,
 };
 
 REGISTER_SCHEDULER(sched_credit_def);
diff --git a/xen/common/sched/private.h b/xen/common/sched/private.h
index 0126a4bb9e..0527a8c70d 100644
--- a/xen/common/sched/private.h
+++ b/xen/common/sched/private.h
@@ -331,6 +331,8 @@  struct scheduler {
                                     struct xen_sysctl_scheduler_op *);
     void         (*dump_settings)  (const struct scheduler *);
     void         (*dump_cpu_state) (const struct scheduler *, int);
+    void         (*move_timers)    (const struct scheduler *,
+                                    struct sched_resource *);
 };
 
 static inline int sched_init(struct scheduler *s)
@@ -485,6 +487,13 @@  static inline int sched_adjust_cpupool(const struct scheduler *s,
     return s->adjust_global ? s->adjust_global(s, op) : 0;
 }
 
+static inline void sched_move_timers(const struct scheduler *s,
+                                     struct sched_resource *sr)
+{
+    if ( s->move_timers )
+        s->move_timers(s, sr);
+}
+
 static inline void sched_unit_pause_nosync(const struct sched_unit *unit)
 {
     struct vcpu *v;
@@ -622,6 +631,7 @@  struct cpu_rm_data *alloc_cpu_rm_data(unsigned int cpu, bool aff_alloc);
 void free_cpu_rm_data(struct cpu_rm_data *mem, unsigned int cpu);
 int schedule_cpu_rm(unsigned int cpu, struct cpu_rm_data *mem);
 int sched_move_domain(struct domain *d, struct cpupool *c);
+void sched_migrate_timers(unsigned int cpu);
 struct cpupool *cpupool_get_by_id(unsigned int poolid);
 void cpupool_put(struct cpupool *pool);
 int cpupool_add_domain(struct domain *d, unsigned int poolid);
diff --git a/xen/common/sched/rt.c b/xen/common/sched/rt.c
index 1f8d074884..d443cd5831 100644
--- a/xen/common/sched/rt.c
+++ b/xen/common/sched/rt.c
@@ -750,6 +750,27 @@  rt_switch_sched(struct scheduler *new_ops, unsigned int cpu,
     return &prv->lock;
 }
 
+static void move_repl_timer(struct rt_private *prv, unsigned int old_cpu)
+{
+    cpumask_t *online = get_sched_res(old_cpu)->cpupool->res_valid;
+    unsigned int new_cpu = cpumask_cycle(old_cpu, online);
+
+    /*
+     * Make sure the timer run on one of the cpus that are still available
+     * to this scheduler. If there aren't any left, it means it's the time
+     * to just kill it.
+     */
+    if ( new_cpu >= nr_cpu_ids )
+    {
+        kill_timer(&prv->repl_timer);
+        dprintk(XENLOG_DEBUG, "RTDS: timer killed on cpu %d\n", old_cpu);
+    }
+    else
+    {
+        migrate_timer(&prv->repl_timer, new_cpu);
+    }
+}
+
 static void cf_check
 rt_deinit_pdata(const struct scheduler *ops, void *pcpu, int cpu)
 {
@@ -759,25 +780,25 @@  rt_deinit_pdata(const struct scheduler *ops, void *pcpu, int cpu)
     spin_lock_irqsave(&prv->lock, flags);
 
     if ( prv->repl_timer.cpu == cpu )
-    {
-        cpumask_t *online = get_sched_res(cpu)->cpupool->res_valid;
-        unsigned int new_cpu = cpumask_cycle(cpu, online);
+        move_repl_timer(prv, cpu);
 
-        /*
-         * Make sure the timer run on one of the cpus that are still available
-         * to this scheduler. If there aren't any left, it means it's the time
-         * to just kill it.
-         */
-        if ( new_cpu >= nr_cpu_ids )
-        {
-            kill_timer(&prv->repl_timer);
-            dprintk(XENLOG_DEBUG, "RTDS: timer killed on cpu %d\n", cpu);
-        }
-        else
-        {
-            migrate_timer(&prv->repl_timer, new_cpu);
-        }
-    }
+    spin_unlock_irqrestore(&prv->lock, flags);
+}
+
+static void cf_check
+rt_move_timers(const struct scheduler *ops, struct sched_resource *sr)
+{
+    unsigned long flags;
+    struct rt_private *prv = rt_priv(ops);
+    unsigned int old_cpu;
+
+    spin_lock_irqsave(&prv->lock, flags);
+
+    old_cpu = prv->repl_timer.cpu;
+    if ( prv->repl_timer.status != TIMER_STATUS_invalid &&
+         prv->repl_timer.status != TIMER_STATUS_killed &&
+         !cpumask_test_cpu(old_cpu, sr->cpupool->res_valid) )
+        move_repl_timer(prv, old_cpu);
 
     spin_unlock_irqrestore(&prv->lock, flags);
 }
@@ -1561,6 +1582,7 @@  static const struct scheduler sched_rtds_def = {
     .sleep          = rt_unit_sleep,
     .wake           = rt_unit_wake,
     .context_saved  = rt_context_saved,
+    .move_timers    = rt_move_timers,
 };
 
 REGISTER_SCHEDULER(sched_rtds_def);