Message ID | 20221209022305.321149-1-joel@joelfernandes.org (mailing list archive) |
---|---|
State | Superseded |
Headers | show |
Series | [v2,1/2] locktorture: Allow non-rtmutex lock types to be boosted | expand |
On Thu, Dec 8, 2022 at 9:23 PM Joel Fernandes (Google) <joel@joelfernandes.org> wrote: > > Currently RT boosting is only done for rtmutex_lock, however with proxy > execution, we also have the mutex_lock participating in priorities. To > exercise the testing better, add RT boosting to other lock testing types > as well, using a new knob (rt_boost). > > Tested with boot parameters: > locktorture.torture_type=mutex_lock > locktorture.onoff_interval=1 > locktorture.nwriters_stress=8 > locktorture.stutter=0 > locktorture.rt_boost=1 > locktorture.rt_boost_factor=1 > locktorture.nlocks=3 > > For the rtmutex test, rt_boost is always enabled even if disabling is > requested. Sorry, these 2 lines are outdated, I'll hope for you to delete the 2 lines, or I can respin and send later. thanks, - Joel > > Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org> > --- > kernel/locking/locktorture.c | 99 ++++++++++++++++++++---------------- > 1 file changed, 56 insertions(+), 43 deletions(-) > > diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c > index bc3557677eed..8968fd65a90d 100644 > --- a/kernel/locking/locktorture.c > +++ b/kernel/locking/locktorture.c > @@ -46,6 +46,8 @@ torture_param(int, shutdown_secs, 0, "Shutdown time (j), <= zero to disable."); > torture_param(int, stat_interval, 60, > "Number of seconds between stats printk()s"); > torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable"); > +torture_param(int, rt_boost, 2, > + "Do periodic rt-boost. 0=Disable, 1=Only for rt_mutex, 2=For all lock types."); > torture_param(int, verbose, 1, > "Enable verbose debugging printk()s"); > torture_param(int, nlocks, 1, > @@ -129,15 +131,49 @@ static void torture_lock_busted_write_unlock(int tid __maybe_unused) > /* BUGGY, do not use in real life!!! */ > } > > -static void torture_boost_dummy(struct torture_random_state *trsp) > +static void __torture_rt_boost(struct torture_random_state *trsp) > { > - /* Only rtmutexes care about priority */ > + const unsigned int factor = 50000; /* yes, quite arbitrary */ > + > + if (!rt_task(current)) { > + /* > + * Boost priority once every ~50k operations. When the > + * task tries to take the lock, the rtmutex it will account > + * for the new priority, and do any corresponding pi-dance. > + */ > + if (trsp && !(torture_random(trsp) % > + (cxt.nrealwriters_stress * factor))) { > + sched_set_fifo(current); > + } else /* common case, do nothing */ > + return; > + } else { > + /* > + * The task will remain boosted for another ~500k operations, > + * then restored back to its original prio, and so forth. > + * > + * When @trsp is nil, we want to force-reset the task for > + * stopping the kthread. > + */ > + if (!trsp || !(torture_random(trsp) % > + (cxt.nrealwriters_stress * factor * 2))) { > + sched_set_normal(current, 0); > + } else /* common case, do nothing */ > + return; > + } > +} > + > +static void torture_rt_boost(struct torture_random_state *trsp) > +{ > + if (rt_boost != 2) > + return; > + > + __torture_rt_boost(trsp); > } > > static struct lock_torture_ops lock_busted_ops = { > .writelock = torture_lock_busted_write_lock, > .write_delay = torture_lock_busted_write_delay, > - .task_boost = torture_boost_dummy, > + .task_boost = torture_rt_boost, > .writeunlock = torture_lock_busted_write_unlock, > .readlock = NULL, > .read_delay = NULL, > @@ -181,7 +217,7 @@ __releases(torture_spinlock) > static struct lock_torture_ops spin_lock_ops = { > .writelock = torture_spin_lock_write_lock, > .write_delay = torture_spin_lock_write_delay, > - .task_boost = torture_boost_dummy, > + .task_boost = torture_rt_boost, > .writeunlock = torture_spin_lock_write_unlock, > .readlock = NULL, > .read_delay = NULL, > @@ -208,7 +244,7 @@ __releases(torture_spinlock) > static struct lock_torture_ops spin_lock_irq_ops = { > .writelock = torture_spin_lock_write_lock_irq, > .write_delay = torture_spin_lock_write_delay, > - .task_boost = torture_boost_dummy, > + .task_boost = torture_rt_boost, > .writeunlock = torture_lock_spin_write_unlock_irq, > .readlock = NULL, > .read_delay = NULL, > @@ -277,7 +313,7 @@ __releases(torture_rwlock) > static struct lock_torture_ops rw_lock_ops = { > .writelock = torture_rwlock_write_lock, > .write_delay = torture_rwlock_write_delay, > - .task_boost = torture_boost_dummy, > + .task_boost = torture_rt_boost, > .writeunlock = torture_rwlock_write_unlock, > .readlock = torture_rwlock_read_lock, > .read_delay = torture_rwlock_read_delay, > @@ -320,7 +356,7 @@ __releases(torture_rwlock) > static struct lock_torture_ops rw_lock_irq_ops = { > .writelock = torture_rwlock_write_lock_irq, > .write_delay = torture_rwlock_write_delay, > - .task_boost = torture_boost_dummy, > + .task_boost = torture_rt_boost, > .writeunlock = torture_rwlock_write_unlock_irq, > .readlock = torture_rwlock_read_lock_irq, > .read_delay = torture_rwlock_read_delay, > @@ -362,7 +398,7 @@ __releases(torture_mutex) > static struct lock_torture_ops mutex_lock_ops = { > .writelock = torture_mutex_lock, > .write_delay = torture_mutex_delay, > - .task_boost = torture_boost_dummy, > + .task_boost = torture_rt_boost, > .writeunlock = torture_mutex_unlock, > .readlock = NULL, > .read_delay = NULL, > @@ -460,7 +496,7 @@ static struct lock_torture_ops ww_mutex_lock_ops = { > .exit = torture_ww_mutex_exit, > .writelock = torture_ww_mutex_lock, > .write_delay = torture_mutex_delay, > - .task_boost = torture_boost_dummy, > + .task_boost = torture_rt_boost, > .writeunlock = torture_ww_mutex_unlock, > .readlock = NULL, > .read_delay = NULL, > @@ -478,37 +514,6 @@ __acquires(torture_rtmutex) > return 0; > } > > -static void torture_rtmutex_boost(struct torture_random_state *trsp) > -{ > - const unsigned int factor = 50000; /* yes, quite arbitrary */ > - > - if (!rt_task(current)) { > - /* > - * Boost priority once every ~50k operations. When the > - * task tries to take the lock, the rtmutex it will account > - * for the new priority, and do any corresponding pi-dance. > - */ > - if (trsp && !(torture_random(trsp) % > - (cxt.nrealwriters_stress * factor))) { > - sched_set_fifo(current); > - } else /* common case, do nothing */ > - return; > - } else { > - /* > - * The task will remain boosted for another ~500k operations, > - * then restored back to its original prio, and so forth. > - * > - * When @trsp is nil, we want to force-reset the task for > - * stopping the kthread. > - */ > - if (!trsp || !(torture_random(trsp) % > - (cxt.nrealwriters_stress * factor * 2))) { > - sched_set_normal(current, 0); > - } else /* common case, do nothing */ > - return; > - } > -} > - > static void torture_rtmutex_delay(struct torture_random_state *trsp) > { > const unsigned long shortdelay_us = 2; > @@ -534,10 +539,18 @@ __releases(torture_rtmutex) > rt_mutex_unlock(&torture_rtmutex); > } > > +static void torture_rt_boost_rtmutex(struct torture_random_state *trsp) > +{ > + if (!rt_boost) > + return; > + > + __torture_rt_boost(trsp); > +} > + > static struct lock_torture_ops rtmutex_lock_ops = { > .writelock = torture_rtmutex_lock, > .write_delay = torture_rtmutex_delay, > - .task_boost = torture_rtmutex_boost, > + .task_boost = torture_rt_boost_rtmutex, > .writeunlock = torture_rtmutex_unlock, > .readlock = NULL, > .read_delay = NULL, > @@ -604,7 +617,7 @@ __releases(torture_rwsem) > static struct lock_torture_ops rwsem_lock_ops = { > .writelock = torture_rwsem_down_write, > .write_delay = torture_rwsem_write_delay, > - .task_boost = torture_boost_dummy, > + .task_boost = torture_rt_boost, > .writeunlock = torture_rwsem_up_write, > .readlock = torture_rwsem_down_read, > .read_delay = torture_rwsem_read_delay, > @@ -656,7 +669,7 @@ static struct lock_torture_ops percpu_rwsem_lock_ops = { > .exit = torture_percpu_rwsem_exit, > .writelock = torture_percpu_rwsem_down_write, > .write_delay = torture_rwsem_write_delay, > - .task_boost = torture_boost_dummy, > + .task_boost = torture_rt_boost, > .writeunlock = torture_percpu_rwsem_up_write, > .readlock = torture_percpu_rwsem_down_read, > .read_delay = torture_rwsem_read_delay, > -- > 2.39.0.rc1.256.g54fd8350bd-goog >
On Fri, 09 Dec 2022, Joel Fernandes (Google) wrote: >Currently RT boosting is only done for rtmutex_lock, however with proxy >execution, we also have the mutex_lock participating in priorities. To >exercise the testing better, add RT boosting to other lock testing types >as well, using a new knob (rt_boost). No particular objection to the patches, but shouldn't these go as part of the next iteration of the PE series? Thanks, Davidlohr
> On Dec 8, 2022, at 10:50 PM, Davidlohr Bueso <dave@stgolabs.net> wrote: > > On Fri, 09 Dec 2022, Joel Fernandes (Google) wrote: > >> Currently RT boosting is only done for rtmutex_lock, however with proxy >> execution, we also have the mutex_lock participating in priorities. To >> exercise the testing better, add RT boosting to other lock testing types >> as well, using a new knob (rt_boost). > > No particular objection to the patches, but shouldn't these go as part > of the next iteration of the PE series? Hey Davidlohr, Nice to hear from you. Paul was interested in these for some things he is looking at and also as general clean up ;) Thanks. > > Thanks, > Davidlohr
On Fri, 09 Dec 2022, Joel Fernandes (Google) wrote: >Currently RT boosting is only done for rtmutex_lock, however with proxy >execution, we also have the mutex_lock participating in priorities. To >exercise the testing better, add RT boosting to other lock testing types >as well, using a new knob (rt_boost). > >Tested with boot parameters: >locktorture.torture_type=mutex_lock >locktorture.onoff_interval=1 >locktorture.nwriters_stress=8 >locktorture.stutter=0 >locktorture.rt_boost=1 >locktorture.rt_boost_factor=1 >locktorture.nlocks=3 > >For the rtmutex test, rt_boost is always enabled even if disabling is >requested. > >Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org> >--- > kernel/locking/locktorture.c | 99 ++++++++++++++++++++---------------- > 1 file changed, 56 insertions(+), 43 deletions(-) > >diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c >index bc3557677eed..8968fd65a90d 100644 >--- a/kernel/locking/locktorture.c >+++ b/kernel/locking/locktorture.c >@@ -46,6 +46,8 @@ torture_param(int, shutdown_secs, 0, "Shutdown time (j), <= zero to disable."); > torture_param(int, stat_interval, 60, > "Number of seconds between stats printk()s"); > torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable"); >+torture_param(int, rt_boost, 2, >+ "Do periodic rt-boost. 0=Disable, 1=Only for rt_mutex, 2=For all lock types."); Sorry if this was already discussed, but what would be the intent of option 2 without PE? Thanks, Davidlohr
On Fri, Dec 9, 2022 at 4:59 PM Davidlohr Bueso <dave@stgolabs.net> wrote: > > On Fri, 09 Dec 2022, Joel Fernandes (Google) wrote: > > >Currently RT boosting is only done for rtmutex_lock, however with proxy > >execution, we also have the mutex_lock participating in priorities. To > >exercise the testing better, add RT boosting to other lock testing types > >as well, using a new knob (rt_boost). > > > >Tested with boot parameters: > >locktorture.torture_type=mutex_lock > >locktorture.onoff_interval=1 > >locktorture.nwriters_stress=8 > >locktorture.stutter=0 > >locktorture.rt_boost=1 > >locktorture.rt_boost_factor=1 > >locktorture.nlocks=3 > > > >For the rtmutex test, rt_boost is always enabled even if disabling is > >requested. > > > >Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org> > >--- > > kernel/locking/locktorture.c | 99 ++++++++++++++++++++---------------- > > 1 file changed, 56 insertions(+), 43 deletions(-) > > > >diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c > >index bc3557677eed..8968fd65a90d 100644 > >--- a/kernel/locking/locktorture.c > >+++ b/kernel/locking/locktorture.c > >@@ -46,6 +46,8 @@ torture_param(int, shutdown_secs, 0, "Shutdown time (j), <= zero to disable."); > > torture_param(int, stat_interval, 60, > > "Number of seconds between stats printk()s"); > > torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable"); > >+torture_param(int, rt_boost, 2, > >+ "Do periodic rt-boost. 0=Disable, 1=Only for rt_mutex, 2=For all lock types."); > > Sorry if this was already discussed, but what would be the intent of option 2 without PE? Np, Yes it was discussed. I would like to be, in order to exercise more scheduler paths. It is not an invasive change in any case. We have had issues with race conditions in the scheduler paths in the past, that I would prefer to have in the test. The scheduler has become incredibly complex over time with new features such as core scheduling and heisenbugs are showing up. I am still fighting an old 4.19 kernel issue which is likely scheduler related, for instance. Thanks.
On Fri, Dec 09, 2022 at 02:23:04AM +0000, Joel Fernandes (Google) wrote: > Currently RT boosting is only done for rtmutex_lock, however with proxy > execution, we also have the mutex_lock participating in priorities. To > exercise the testing better, add RT boosting to other lock testing types > as well, using a new knob (rt_boost). > > Tested with boot parameters: > locktorture.torture_type=mutex_lock > locktorture.onoff_interval=1 > locktorture.nwriters_stress=8 > locktorture.stutter=0 > locktorture.rt_boost=1 > locktorture.rt_boost_factor=1 > locktorture.nlocks=3 > > For the rtmutex test, rt_boost is always enabled even if disabling is > requested. > > Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org> Nice, thank you! Except that "git am -3" doesn't know about the commit on which this is based. Could you please rebase onto the -rcu tree's "dev" branch? Though there is nothing touching kernel/locking/locktorture.c in -rcu at the moment, so I confess some curiosity as to exactly what these patches are based on. ;-) Thanx, Paul > --- > kernel/locking/locktorture.c | 99 ++++++++++++++++++++---------------- > 1 file changed, 56 insertions(+), 43 deletions(-) > > diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c > index bc3557677eed..8968fd65a90d 100644 > --- a/kernel/locking/locktorture.c > +++ b/kernel/locking/locktorture.c > @@ -46,6 +46,8 @@ torture_param(int, shutdown_secs, 0, "Shutdown time (j), <= zero to disable."); > torture_param(int, stat_interval, 60, > "Number of seconds between stats printk()s"); > torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable"); > +torture_param(int, rt_boost, 2, > + "Do periodic rt-boost. 0=Disable, 1=Only for rt_mutex, 2=For all lock types."); > torture_param(int, verbose, 1, > "Enable verbose debugging printk()s"); > torture_param(int, nlocks, 1, > @@ -129,15 +131,49 @@ static void torture_lock_busted_write_unlock(int tid __maybe_unused) > /* BUGGY, do not use in real life!!! */ > } > > -static void torture_boost_dummy(struct torture_random_state *trsp) > +static void __torture_rt_boost(struct torture_random_state *trsp) > { > - /* Only rtmutexes care about priority */ > + const unsigned int factor = 50000; /* yes, quite arbitrary */ > + > + if (!rt_task(current)) { > + /* > + * Boost priority once every ~50k operations. When the > + * task tries to take the lock, the rtmutex it will account > + * for the new priority, and do any corresponding pi-dance. > + */ > + if (trsp && !(torture_random(trsp) % > + (cxt.nrealwriters_stress * factor))) { > + sched_set_fifo(current); > + } else /* common case, do nothing */ > + return; > + } else { > + /* > + * The task will remain boosted for another ~500k operations, > + * then restored back to its original prio, and so forth. > + * > + * When @trsp is nil, we want to force-reset the task for > + * stopping the kthread. > + */ > + if (!trsp || !(torture_random(trsp) % > + (cxt.nrealwriters_stress * factor * 2))) { > + sched_set_normal(current, 0); > + } else /* common case, do nothing */ > + return; > + } > +} > + > +static void torture_rt_boost(struct torture_random_state *trsp) > +{ > + if (rt_boost != 2) > + return; > + > + __torture_rt_boost(trsp); > } > > static struct lock_torture_ops lock_busted_ops = { > .writelock = torture_lock_busted_write_lock, > .write_delay = torture_lock_busted_write_delay, > - .task_boost = torture_boost_dummy, > + .task_boost = torture_rt_boost, > .writeunlock = torture_lock_busted_write_unlock, > .readlock = NULL, > .read_delay = NULL, > @@ -181,7 +217,7 @@ __releases(torture_spinlock) > static struct lock_torture_ops spin_lock_ops = { > .writelock = torture_spin_lock_write_lock, > .write_delay = torture_spin_lock_write_delay, > - .task_boost = torture_boost_dummy, > + .task_boost = torture_rt_boost, > .writeunlock = torture_spin_lock_write_unlock, > .readlock = NULL, > .read_delay = NULL, > @@ -208,7 +244,7 @@ __releases(torture_spinlock) > static struct lock_torture_ops spin_lock_irq_ops = { > .writelock = torture_spin_lock_write_lock_irq, > .write_delay = torture_spin_lock_write_delay, > - .task_boost = torture_boost_dummy, > + .task_boost = torture_rt_boost, > .writeunlock = torture_lock_spin_write_unlock_irq, > .readlock = NULL, > .read_delay = NULL, > @@ -277,7 +313,7 @@ __releases(torture_rwlock) > static struct lock_torture_ops rw_lock_ops = { > .writelock = torture_rwlock_write_lock, > .write_delay = torture_rwlock_write_delay, > - .task_boost = torture_boost_dummy, > + .task_boost = torture_rt_boost, > .writeunlock = torture_rwlock_write_unlock, > .readlock = torture_rwlock_read_lock, > .read_delay = torture_rwlock_read_delay, > @@ -320,7 +356,7 @@ __releases(torture_rwlock) > static struct lock_torture_ops rw_lock_irq_ops = { > .writelock = torture_rwlock_write_lock_irq, > .write_delay = torture_rwlock_write_delay, > - .task_boost = torture_boost_dummy, > + .task_boost = torture_rt_boost, > .writeunlock = torture_rwlock_write_unlock_irq, > .readlock = torture_rwlock_read_lock_irq, > .read_delay = torture_rwlock_read_delay, > @@ -362,7 +398,7 @@ __releases(torture_mutex) > static struct lock_torture_ops mutex_lock_ops = { > .writelock = torture_mutex_lock, > .write_delay = torture_mutex_delay, > - .task_boost = torture_boost_dummy, > + .task_boost = torture_rt_boost, > .writeunlock = torture_mutex_unlock, > .readlock = NULL, > .read_delay = NULL, > @@ -460,7 +496,7 @@ static struct lock_torture_ops ww_mutex_lock_ops = { > .exit = torture_ww_mutex_exit, > .writelock = torture_ww_mutex_lock, > .write_delay = torture_mutex_delay, > - .task_boost = torture_boost_dummy, > + .task_boost = torture_rt_boost, > .writeunlock = torture_ww_mutex_unlock, > .readlock = NULL, > .read_delay = NULL, > @@ -478,37 +514,6 @@ __acquires(torture_rtmutex) > return 0; > } > > -static void torture_rtmutex_boost(struct torture_random_state *trsp) > -{ > - const unsigned int factor = 50000; /* yes, quite arbitrary */ > - > - if (!rt_task(current)) { > - /* > - * Boost priority once every ~50k operations. When the > - * task tries to take the lock, the rtmutex it will account > - * for the new priority, and do any corresponding pi-dance. > - */ > - if (trsp && !(torture_random(trsp) % > - (cxt.nrealwriters_stress * factor))) { > - sched_set_fifo(current); > - } else /* common case, do nothing */ > - return; > - } else { > - /* > - * The task will remain boosted for another ~500k operations, > - * then restored back to its original prio, and so forth. > - * > - * When @trsp is nil, we want to force-reset the task for > - * stopping the kthread. > - */ > - if (!trsp || !(torture_random(trsp) % > - (cxt.nrealwriters_stress * factor * 2))) { > - sched_set_normal(current, 0); > - } else /* common case, do nothing */ > - return; > - } > -} > - > static void torture_rtmutex_delay(struct torture_random_state *trsp) > { > const unsigned long shortdelay_us = 2; > @@ -534,10 +539,18 @@ __releases(torture_rtmutex) > rt_mutex_unlock(&torture_rtmutex); > } > > +static void torture_rt_boost_rtmutex(struct torture_random_state *trsp) > +{ > + if (!rt_boost) > + return; > + > + __torture_rt_boost(trsp); > +} > + > static struct lock_torture_ops rtmutex_lock_ops = { > .writelock = torture_rtmutex_lock, > .write_delay = torture_rtmutex_delay, > - .task_boost = torture_rtmutex_boost, > + .task_boost = torture_rt_boost_rtmutex, > .writeunlock = torture_rtmutex_unlock, > .readlock = NULL, > .read_delay = NULL, > @@ -604,7 +617,7 @@ __releases(torture_rwsem) > static struct lock_torture_ops rwsem_lock_ops = { > .writelock = torture_rwsem_down_write, > .write_delay = torture_rwsem_write_delay, > - .task_boost = torture_boost_dummy, > + .task_boost = torture_rt_boost, > .writeunlock = torture_rwsem_up_write, > .readlock = torture_rwsem_down_read, > .read_delay = torture_rwsem_read_delay, > @@ -656,7 +669,7 @@ static struct lock_torture_ops percpu_rwsem_lock_ops = { > .exit = torture_percpu_rwsem_exit, > .writelock = torture_percpu_rwsem_down_write, > .write_delay = torture_rwsem_write_delay, > - .task_boost = torture_boost_dummy, > + .task_boost = torture_rt_boost, > .writeunlock = torture_percpu_rwsem_up_write, > .readlock = torture_percpu_rwsem_down_read, > .read_delay = torture_rwsem_read_delay, > -- > 2.39.0.rc1.256.g54fd8350bd-goog >
On Tue, Dec 13, 2022 at 3:24 PM Paul E. McKenney <paulmck@kernel.org> wrote: > > On Fri, Dec 09, 2022 at 02:23:04AM +0000, Joel Fernandes (Google) wrote: > > Currently RT boosting is only done for rtmutex_lock, however with proxy > > execution, we also have the mutex_lock participating in priorities. To > > exercise the testing better, add RT boosting to other lock testing types > > as well, using a new knob (rt_boost). > > > > Tested with boot parameters: > > locktorture.torture_type=mutex_lock > > locktorture.onoff_interval=1 > > locktorture.nwriters_stress=8 > > locktorture.stutter=0 > > locktorture.rt_boost=1 > > locktorture.rt_boost_factor=1 > > locktorture.nlocks=3 > > > > For the rtmutex test, rt_boost is always enabled even if disabling is > > requested. > > > > Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org> > > Nice, thank you! > > Except that "git am -3" doesn't know about the commit on which this > is based. Could you please rebase onto the -rcu tree's "dev" branch? > > Though there is nothing touching kernel/locking/locktorture.c in > -rcu at the moment, so I confess some curiosity as to exactly what > these patches are based on. ;-) Ah, I am not sure any more as it has been some time but I believe it was v5.15. My bad and I rebased it on to rcu/dev branch and resent it just now :) Thank you! - Joel
diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c index bc3557677eed..8968fd65a90d 100644 --- a/kernel/locking/locktorture.c +++ b/kernel/locking/locktorture.c @@ -46,6 +46,8 @@ torture_param(int, shutdown_secs, 0, "Shutdown time (j), <= zero to disable."); torture_param(int, stat_interval, 60, "Number of seconds between stats printk()s"); torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable"); +torture_param(int, rt_boost, 2, + "Do periodic rt-boost. 0=Disable, 1=Only for rt_mutex, 2=For all lock types."); torture_param(int, verbose, 1, "Enable verbose debugging printk()s"); torture_param(int, nlocks, 1, @@ -129,15 +131,49 @@ static void torture_lock_busted_write_unlock(int tid __maybe_unused) /* BUGGY, do not use in real life!!! */ } -static void torture_boost_dummy(struct torture_random_state *trsp) +static void __torture_rt_boost(struct torture_random_state *trsp) { - /* Only rtmutexes care about priority */ + const unsigned int factor = 50000; /* yes, quite arbitrary */ + + if (!rt_task(current)) { + /* + * Boost priority once every ~50k operations. When the + * task tries to take the lock, the rtmutex it will account + * for the new priority, and do any corresponding pi-dance. + */ + if (trsp && !(torture_random(trsp) % + (cxt.nrealwriters_stress * factor))) { + sched_set_fifo(current); + } else /* common case, do nothing */ + return; + } else { + /* + * The task will remain boosted for another ~500k operations, + * then restored back to its original prio, and so forth. + * + * When @trsp is nil, we want to force-reset the task for + * stopping the kthread. + */ + if (!trsp || !(torture_random(trsp) % + (cxt.nrealwriters_stress * factor * 2))) { + sched_set_normal(current, 0); + } else /* common case, do nothing */ + return; + } +} + +static void torture_rt_boost(struct torture_random_state *trsp) +{ + if (rt_boost != 2) + return; + + __torture_rt_boost(trsp); } static struct lock_torture_ops lock_busted_ops = { .writelock = torture_lock_busted_write_lock, .write_delay = torture_lock_busted_write_delay, - .task_boost = torture_boost_dummy, + .task_boost = torture_rt_boost, .writeunlock = torture_lock_busted_write_unlock, .readlock = NULL, .read_delay = NULL, @@ -181,7 +217,7 @@ __releases(torture_spinlock) static struct lock_torture_ops spin_lock_ops = { .writelock = torture_spin_lock_write_lock, .write_delay = torture_spin_lock_write_delay, - .task_boost = torture_boost_dummy, + .task_boost = torture_rt_boost, .writeunlock = torture_spin_lock_write_unlock, .readlock = NULL, .read_delay = NULL, @@ -208,7 +244,7 @@ __releases(torture_spinlock) static struct lock_torture_ops spin_lock_irq_ops = { .writelock = torture_spin_lock_write_lock_irq, .write_delay = torture_spin_lock_write_delay, - .task_boost = torture_boost_dummy, + .task_boost = torture_rt_boost, .writeunlock = torture_lock_spin_write_unlock_irq, .readlock = NULL, .read_delay = NULL, @@ -277,7 +313,7 @@ __releases(torture_rwlock) static struct lock_torture_ops rw_lock_ops = { .writelock = torture_rwlock_write_lock, .write_delay = torture_rwlock_write_delay, - .task_boost = torture_boost_dummy, + .task_boost = torture_rt_boost, .writeunlock = torture_rwlock_write_unlock, .readlock = torture_rwlock_read_lock, .read_delay = torture_rwlock_read_delay, @@ -320,7 +356,7 @@ __releases(torture_rwlock) static struct lock_torture_ops rw_lock_irq_ops = { .writelock = torture_rwlock_write_lock_irq, .write_delay = torture_rwlock_write_delay, - .task_boost = torture_boost_dummy, + .task_boost = torture_rt_boost, .writeunlock = torture_rwlock_write_unlock_irq, .readlock = torture_rwlock_read_lock_irq, .read_delay = torture_rwlock_read_delay, @@ -362,7 +398,7 @@ __releases(torture_mutex) static struct lock_torture_ops mutex_lock_ops = { .writelock = torture_mutex_lock, .write_delay = torture_mutex_delay, - .task_boost = torture_boost_dummy, + .task_boost = torture_rt_boost, .writeunlock = torture_mutex_unlock, .readlock = NULL, .read_delay = NULL, @@ -460,7 +496,7 @@ static struct lock_torture_ops ww_mutex_lock_ops = { .exit = torture_ww_mutex_exit, .writelock = torture_ww_mutex_lock, .write_delay = torture_mutex_delay, - .task_boost = torture_boost_dummy, + .task_boost = torture_rt_boost, .writeunlock = torture_ww_mutex_unlock, .readlock = NULL, .read_delay = NULL, @@ -478,37 +514,6 @@ __acquires(torture_rtmutex) return 0; } -static void torture_rtmutex_boost(struct torture_random_state *trsp) -{ - const unsigned int factor = 50000; /* yes, quite arbitrary */ - - if (!rt_task(current)) { - /* - * Boost priority once every ~50k operations. When the - * task tries to take the lock, the rtmutex it will account - * for the new priority, and do any corresponding pi-dance. - */ - if (trsp && !(torture_random(trsp) % - (cxt.nrealwriters_stress * factor))) { - sched_set_fifo(current); - } else /* common case, do nothing */ - return; - } else { - /* - * The task will remain boosted for another ~500k operations, - * then restored back to its original prio, and so forth. - * - * When @trsp is nil, we want to force-reset the task for - * stopping the kthread. - */ - if (!trsp || !(torture_random(trsp) % - (cxt.nrealwriters_stress * factor * 2))) { - sched_set_normal(current, 0); - } else /* common case, do nothing */ - return; - } -} - static void torture_rtmutex_delay(struct torture_random_state *trsp) { const unsigned long shortdelay_us = 2; @@ -534,10 +539,18 @@ __releases(torture_rtmutex) rt_mutex_unlock(&torture_rtmutex); } +static void torture_rt_boost_rtmutex(struct torture_random_state *trsp) +{ + if (!rt_boost) + return; + + __torture_rt_boost(trsp); +} + static struct lock_torture_ops rtmutex_lock_ops = { .writelock = torture_rtmutex_lock, .write_delay = torture_rtmutex_delay, - .task_boost = torture_rtmutex_boost, + .task_boost = torture_rt_boost_rtmutex, .writeunlock = torture_rtmutex_unlock, .readlock = NULL, .read_delay = NULL, @@ -604,7 +617,7 @@ __releases(torture_rwsem) static struct lock_torture_ops rwsem_lock_ops = { .writelock = torture_rwsem_down_write, .write_delay = torture_rwsem_write_delay, - .task_boost = torture_boost_dummy, + .task_boost = torture_rt_boost, .writeunlock = torture_rwsem_up_write, .readlock = torture_rwsem_down_read, .read_delay = torture_rwsem_read_delay, @@ -656,7 +669,7 @@ static struct lock_torture_ops percpu_rwsem_lock_ops = { .exit = torture_percpu_rwsem_exit, .writelock = torture_percpu_rwsem_down_write, .write_delay = torture_rwsem_write_delay, - .task_boost = torture_boost_dummy, + .task_boost = torture_rt_boost, .writeunlock = torture_percpu_rwsem_up_write, .readlock = torture_percpu_rwsem_down_read, .read_delay = torture_rwsem_read_delay,
Currently RT boosting is only done for rtmutex_lock, however with proxy execution, we also have the mutex_lock participating in priorities. To exercise the testing better, add RT boosting to other lock testing types as well, using a new knob (rt_boost). Tested with boot parameters: locktorture.torture_type=mutex_lock locktorture.onoff_interval=1 locktorture.nwriters_stress=8 locktorture.stutter=0 locktorture.rt_boost=1 locktorture.rt_boost_factor=1 locktorture.nlocks=3 For the rtmutex test, rt_boost is always enabled even if disabling is requested. Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org> --- kernel/locking/locktorture.c | 99 ++++++++++++++++++++---------------- 1 file changed, 56 insertions(+), 43 deletions(-)