Message ID | 820a185b6765d6246ac34f612faedeb35189487c.1596526941.git.yangdongdong@xiaomi.com (mailing list archive) |
---|---|
State | Superseded, archived |
Headers | show |
Series | [v4] sched: Provide USF for the portable equipment. | expand |
On Tue, Aug 04, 2020 at 03:50:35PM +0800, Dongdong Yang wrote: Comments on code stuff only, not if this is actually a valid thing to be doing at all: > --- /dev/null > +++ b/kernel/sched/usf.c > @@ -0,0 +1,294 @@ > +// SPDX-License-Identifier: GPL-2.0 > +/* > + * Copyright (C) 2020 XiaoMi Inc. > + * Author: Yang Dongdong <yangdongdong@xiaomi.com> > + * This program is free software; you can redistribute it and/or modify > + * it under the terms of the GNU General Public License version 2 as > + * published by the Free Software Foundation. > + * > + * This program is distributed in the hope that it will be useful, > + * but WITHOUT ANY WARRANTY; without even the implied warranty of > + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. > + * See http://www.gnu.org/licenses/gpl-2.0.html for more details. No need for the two paragraph "boiler plate" license text now that you have a SPDX line, please remove them. > + */ > + > +#include <linux/module.h> > +#include <linux/init.h> > +#include <linux/platform_device.h> > +#include <linux/kthread.h> > +#include <linux/cpu.h> > +#include <linux/sysfs.h> > +#include <linux/kthread.h> > +#include <linux/kobject.h> > +#include <linux/module.h> > +#include <linux/kernel.h> > +#include <linux/init.h> > +#include <linux/kallsyms.h> > +#include <linux/fb.h> > +#include <linux/notifier.h> > +#include <trace/events/sched.h> > +#include "sched.h" > + > +#define BOOST_MIN_V -100 > +#define BOOST_MAX_V 100 > +#define LEVEL_TOP 3 > + > +#define USF_TAG "[usf_sched]" Please pr_fmt instead. > + > +DEFINE_PER_CPU(unsigned long[PID_MAX_DEFAULT], task_hist_nivcsw); > + > +static struct { > + bool is_sched_usf_enabled; > + bool is_screen_on; > + int sysctl_sched_usf_up_l0; > + int sysctl_sched_usf_down; > + int sysctl_sched_usf_non_ux; > + int usf_up_l0; > + int usf_down; > + int usf_non_ux; > +} usf_vdev; > + > +void adjust_task_pred_demand(int cpuid, > + unsigned long *util, > + struct rq *rq) > +{ > + /* sysctl_sched_latency/sysctl_sched_min_granularity */ > + u32 bl_sw_num = 3; > + > + if (!usf_vdev.is_sched_usf_enabled || !rq || !rq->curr || > + (rq->curr->pid >= PID_MAX_DEFAULT)) > + return; > + > + if (usf_vdev.is_screen_on) { > + if (rq->curr->nivcsw > > + (per_cpu(task_hist_nivcsw, cpuid)[rq->curr->pid] > + + bl_sw_num + 1)) { > + (*util) += (*util) >> usf_vdev.usf_up_l0; > + } else if (rq->curr->nivcsw < > + (per_cpu(task_hist_nivcsw, cpuid)[rq->curr->pid] > + + bl_sw_num - 1) && (rq->nr_running < bl_sw_num)) { > + (*util) >>= usf_vdev.usf_down; > + } > + per_cpu(task_hist_nivcsw, cpuid)[rq->curr->pid] = > + rq->curr->nivcsw; > + } else if (rq->curr->mm) { > + (*util) >>= usf_vdev.usf_non_ux; > + } > + > + trace_sched_usf_adjust_utils(cpuid, usf_vdev.usf_up_l0, > + usf_vdev.usf_down, > + usf_vdev.usf_non_ux, *util); > +} > + > +static int usf_lcd_notifier(struct notifier_block *nb, > + unsigned long val, void *data) > +{ > + struct fb_event *evdata = data; > + unsigned int blank; > + > + if (!evdata) > + return 0; > + > + if (val != FB_EVENT_BLANK) > + return 0; > + > + if (evdata->data && val == FB_EVENT_BLANK) { > + blank = *(int *)(evdata->data); > + > + switch (blank) { > + case FB_BLANK_POWERDOWN: > + usf_vdev.is_screen_on = false; > + if (usf_vdev.sysctl_sched_usf_non_ux != 0) > + static_branch_enable(&adjust_task_pred_set); > + else > + static_branch_disable(&adjust_task_pred_set); > + > + break; > + > + case FB_BLANK_UNBLANK: > + usf_vdev.is_screen_on = true; > + if (usf_vdev.sysctl_sched_usf_up_l0 != 0 || > + usf_vdev.sysctl_sched_usf_down != 0) > + static_branch_enable(&adjust_task_pred_set); > + else > + static_branch_disable(&adjust_task_pred_set); > + break; > + default: > + break; > + } > + > + usf_vdev.is_sched_usf_enabled = true; > + pr_info("%s : usf_vdev.is_screen_on:%b\n", > + __func__, usf_vdev.is_screen_on); > + } > + return NOTIFY_OK; > +} > + > +static struct notifier_block usf_lcd_nb = { > + .notifier_call = usf_lcd_notifier, > + .priority = INT_MAX, > +}; > + > +static ssize_t store_sched_usf_up_l0_r(struct kobject *kobj, > + struct kobj_attribute *attr, > + const char *buf, size_t count) > +{ > + int val = 0; > + int ret = 0; > + > + ret = kstrtoint(buf, 0, &val); > + if (ret) > + return ret; > + > + if (val == 0) { > + usf_vdev.sysctl_sched_usf_up_l0 = val; > + usf_vdev.usf_up_l0 = 0; > + } else if ((val > 0) && (val <= BOOST_MAX_V)) { > + usf_vdev.sysctl_sched_usf_up_l0 = val; > + usf_vdev.usf_up_l0 = LEVEL_TOP - > + DIV_ROUND_UP(val, BOOST_MAX_V / 2); > + ret = count; > + } else { > + pr_err(USF_TAG "%d should fall into [%d %d]", > + val, 0, BOOST_MAX_V); > + ret = -EINVAL; > + } > + if ((usf_vdev.sysctl_sched_usf_up_l0 == 0) && > + (usf_vdev.sysctl_sched_usf_down == 0)) > + static_branch_disable(&adjust_task_pred_set); > + else > + static_branch_enable(&adjust_task_pred_set); > + > + return ret; > +} > + > +static ssize_t store_sched_usf_down_r(struct kobject *kobj, > + struct kobj_attribute *attr, > + const char *buf, size_t count) > +{ > + int val = 0; > + int ret = 0; > + > + ret = kstrtoint(buf, 0, &val); > + if (ret) > + return ret; > + > + if ((val >= BOOST_MIN_V) && (val <= 0)) { > + usf_vdev.sysctl_sched_usf_down = val; > + usf_vdev.usf_down = DIV_ROUND_UP(-val, -BOOST_MIN_V / 2); > + ret = count; > + } else { > + pr_err(USF_TAG "%d should fall into [%d %d]", > + val, BOOST_MIN_V, 0); > + ret = -EINVAL; > + } > + if ((usf_vdev.sysctl_sched_usf_up_l0 == 0) && > + (usf_vdev.sysctl_sched_usf_down == 0)) > + static_branch_disable(&adjust_task_pred_set); > + else > + static_branch_enable(&adjust_task_pred_set); > + > + return ret; > +} > + > +static ssize_t store_sched_usf_non_ux_r(struct kobject *kobj, > + struct kobj_attribute *attr, > + const char *buf, size_t count) > +{ > + int val = 0; > + int ret = 0; > + > + ret = kstrtoint(buf, 0, &val); > + if (ret) > + return ret; > + > + if ((val >= BOOST_MIN_V) && (val <= 0)) { > + usf_vdev.sysctl_sched_usf_non_ux = val; > + usf_vdev.usf_non_ux = DIV_ROUND_UP(-val, -BOOST_MIN_V / 2); > + ret = count; > + } else { > + pr_err(USF_TAG "%d should fall into [%d %d]", > + val, BOOST_MIN_V, 0); > + ret = -EINVAL; > + } > + if (usf_vdev.sysctl_sched_usf_non_ux == 0) > + static_branch_disable(&adjust_task_pred_set); > + else > + static_branch_enable(&adjust_task_pred_set); > + > + return ret; > +} > + > +#define usf_attr_rw(_name) \ > +static struct kobj_attribute _name = \ > +__ATTR(_name, 0664, show_##_name, store_##_name) __ATTR_RW()? > + > +#define usf_show_node(_name, _value) \ > +static ssize_t show_##_name \ > +(struct kobject *kobj, struct kobj_attribute *attr, char *buf) \ > +{ \ > + return sprintf(buf, "%d", usf_vdev.sysctl_##_value); \ > +} Again do NOT use raw kobjects. > + > +usf_show_node(sched_usf_up_l0_r, sched_usf_up_l0); > +usf_show_node(sched_usf_down_r, sched_usf_down); > +usf_show_node(sched_usf_non_ux_r, sched_usf_non_ux); > + > +usf_attr_rw(sched_usf_up_l0_r); > +usf_attr_rw(sched_usf_down_r); > +usf_attr_rw(sched_usf_non_ux_r); > + > +static struct attribute *sched_attrs[] = { > + &sched_usf_up_l0_r.attr, > + &sched_usf_down_r.attr, > + &sched_usf_non_ux_r.attr, > + NULL, > +}; > + > +static struct attribute_group sched_attr_group = { > + .attrs = sched_attrs, > +}; ATTRIBUTE_GROUPS()? > + > +static int __init intera_monitor_init(void) > +{ > + int res = -1; > + struct device *dev; > + > + res = fb_register_client(&usf_lcd_nb); > + if (res < 0) { > + pr_err("Failed to register usf_lcd_nb!\n"); > + return res; > + } > + > + /* > + * create a sched_usf in cpu_subsys: > + * /sys/devices/system/cpu/sched_usf/... > + */ > + dev = cpu_subsys.dev_root; > + res = sysfs_create_group(&dev->kobj, &sched_attr_group); Do not just tack on random sysfs files to a random struct device that you do not own. That's ripe for big problems. Ugh, that seems to be how others do it too, not nice. Ok, but at the very least, use DEVICE_ATTR_RW() and do not use kobjects, as you will get into problems there. How does userspace know that these new sysfs files have shown up? You never told it about them, so does it just "guess"? thanks, greg k-h
On Tue, Aug 04, 2020 at 03:50:35PM +0800, Dongdong Yang wrote: > +What: /sys/devices/system/cpu/sched_usf > + /sys/devices/system/cpu/sched_usf/sched_usf_non_ux_r > + /sys/devices/system/cpu/sched_usf/sched_usf_up_l0_r > + /sys/devices/system/cpu/sched_usf/sched_usf_down_r > +Date: Aug 2020 > +Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org> > +Description: User Sensitive Feedback factor auxiliary scheduling which > + is providing more utils adjustment settings to the high level > + by scenario identification. > + sched_usf_non_ux_r: > + The ratio of utils is cut down on screen off. The > + default value is 0, which no util is adjusted on sugov > + calculating utils to select cpufreq. Its range is > + [-100 , 0]. If its value falls into [-50, 0), the half > + of utils, which calculates cpufreq, shall be cut down. > + If its value falls into [-100, -50), only a quarter of > + utils are left to continue to calculate cpufreq. > + It is expected to be set [-100, 0) once enter into the > + identificated scenario, such as listen to music on > + screen off, and recover to 0 on out of the scenario, > + such as screen on. > + > + sched_usf_up_l0_r: > + The ratio of utils is boost up on screen on. The > + default value is 0, which no util is adjusted on sugov > + calculates utils to select cpufreq. Its range is [0 , 100]. > + If its value falls into (0, 50], a quarter of extra utils, > + which calculate cpufreq, shall be added. If its value > + falls into (50, 100], the half of extra utils are added > + to continue to calculate cpufreq. > + It is expected to be set (0, 100] once enter into the > + identificated scenario, such as browsing videolet on > + screen on, and recover to 0 on out of the scenario, > + such as screen off or videolet into background. > + > + sched_usf_down_r: > + The ratio of utils is cut down on screen on. The > + default value is 0, which no util is adjusted on sugov > + calculating utils to select cpufreq. Its range is > + [-100 , 0]. If its value falls into [-50, 0), the half > + of utils, which calculate cpufreq, shall be cut down. > + If its value falls into [-100, -50), only a quarter of > + utils are left to continue to calculate cpufreq. > + It is expected to be set [-100, 0) once enter into the > + identificated scenario, such as browsing videolet on > + screen on, and recover to 0 on out of the scenario, > + such as screen off or vidolet into background. I hate the names, they're a bunch of random letters. Maybe if you strip the 'sched_usf' prefix, on account on these files being in a directory by the same name, you'll have additional budget for readable names? Also, I detest 100, 100 sucks. Use something sane, like 128. Also, I'm not at all sure I understand the text. Also, nothing explains how any of this is supposed to be working on an SMP system. Please advice how this makes sense if you have 64 CPUs. My desktop has a screen, so giving feedback should work, no? > diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig > index e917501..a21c6ad 100644 > --- a/drivers/cpufreq/Kconfig > +++ b/drivers/cpufreq/Kconfig > @@ -224,6 +224,17 @@ config CPUFREQ_DT_PLATDEV > > If in doubt, say N. > > +config SCHED_USF > + bool "User Sensitive Factors for Scheduler" 'bool' means this cannot be a module, yet see below. > + depends on CPU_FREQ_GOV_SCHEDUTIL && FB I really hate how this is a special purpose hack for framebuffer. We don't do special purpose hacks like that. So you have something that only 'works' for small machines, small number of tasks, and requires FB. NAK, we don't do that upstream. > + help > + Select this option to enable the adjustment on the cpufreq with > + the user sensitive factors on schedule. It is special for mobile > + devices which more power care and quick response requirement on > + screen on. > + > + If unsure, say N. > + > if X86 > source "drivers/cpufreq/Kconfig.x86" > endif > diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h > index ed168b0..d5e20b7 100644 > --- a/include/trace/events/sched.h > +++ b/include/trace/events/sched.h > @@ -488,6 +488,41 @@ TRACE_EVENT(sched_process_hang, > #endif /* CONFIG_DETECT_HUNG_TASK */ > > /* > + * Tracepoint for tracking tuils be adjusted by USF: I can't find 'tuils' in my local dictionary, did you mean tools? And what does the comment try to day? Are USF users supposed to use this tracepoint to close a feedback loop or what? > + */ > +#ifdef CONFIG_SCHED_USF > +TRACE_EVENT(sched_usf_adjust_utils, > + > + TP_PROTO(int cpu_id, int up, int down, int nonux, unsigned long utils), You're using tracepoints wrong, pass in that usf_dev pointer and have the assign do all the work. > + > + TP_ARGS(cpu_id, up, down, nonux, utils), > + > + TP_STRUCT__entry( > + __field(int, cpu_id) > + __field(int, up) > + __field(int, down) > + __field(int, nonux) > + __field(unsigned long, utils) > + ), > + > + TP_fast_assign( > + __entry->cpu_id = cpu_id; > + __entry->up = up; > + __entry->down = down; > + __entry->nonux = nonux; > + __entry->utils = utils; > + ), > + > + TP_printk("cpu_id=%d up=%d down=%d nonux=%d utils=%lu", > + __entry->cpu_id, > + __entry->up, > + __entry->down, > + __entry->nonux, > + __entry->utils) > +); > +#endif /* CONFIG_SCHED_USF */ > + > +/* > * Tracks migration of tasks from one runqueue to another. Can be used to > * detect if automatic NUMA balancing is bouncing between nodes. > */ > diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile > index 5fc9c9b..58a0e7b 100644 > --- a/kernel/sched/Makefile > +++ b/kernel/sched/Makefile > @@ -36,3 +36,4 @@ obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o > obj-$(CONFIG_MEMBARRIER) += membarrier.o > obj-$(CONFIG_CPU_ISOLATION) += isolation.o > obj-$(CONFIG_PSI) += psi.o > +obj-$(CONFIG_SCHED_USF) += usf.o > diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c > index 7fbaee2..79a0040 100644 > --- a/kernel/sched/cpufreq_schedutil.c > +++ b/kernel/sched/cpufreq_schedutil.c > @@ -289,12 +289,15 @@ unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs, > return min(max, util); > } > > +DEFINE_STATIC_KEY_FALSE(adjust_task_pred_set); Missing whitespace, bad name. Also wrong file, all the actual control is in that usf nonsense. > static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu) > { > struct rq *rq = cpu_rq(sg_cpu->cpu); > unsigned long util = cpu_util_cfs(rq); > unsigned long max = arch_scale_cpu_capacity(sg_cpu->cpu); > > + if (static_branch_unlikely(&adjust_task_pred_set)) > + adjust_task_pred_demand(sg_cpu->cpu, &util, rq); Missing whitespace, horrible naming, there is nothing task here, Please explain how it makes sense to have that static branch when !SCHED_USF. > sg_cpu->max = max; > sg_cpu->bw_dl = cpu_bw_dl(rq); > > diff --git a/kernel/sched/usf.c b/kernel/sched/usf.c > new file mode 100644 > index 0000000..d4d7998 > --- /dev/null > +++ b/kernel/sched/usf.c > @@ -0,0 +1,294 @@ > +// SPDX-License-Identifier: GPL-2.0 > +/* > + * Copyright (C) 2020 XiaoMi Inc. > + * Author: Yang Dongdong <yangdongdong@xiaomi.com> > + * This program is free software; you can redistribute it and/or modify > + * it under the terms of the GNU General Public License version 2 as > + * published by the Free Software Foundation. > + * > + * This program is distributed in the hope that it will be useful, > + * but WITHOUT ANY WARRANTY; without even the implied warranty of > + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. > + * See http://www.gnu.org/licenses/gpl-2.0.html for more details. > + */ > + > +#include <linux/module.h> > +#include <linux/init.h> > +#include <linux/platform_device.h> > +#include <linux/kthread.h> > +#include <linux/cpu.h> > +#include <linux/sysfs.h> > +#include <linux/kthread.h> > +#include <linux/kobject.h> > +#include <linux/module.h> > +#include <linux/kernel.h> > +#include <linux/init.h> > +#include <linux/kallsyms.h> > +#include <linux/fb.h> > +#include <linux/notifier.h> > +#include <trace/events/sched.h> > +#include "sched.h" > + > +#define BOOST_MIN_V -100 > +#define BOOST_MAX_V 100 > +#define LEVEL_TOP 3 > + > +#define USF_TAG "[usf_sched]" > + > +DEFINE_PER_CPU(unsigned long[PID_MAX_DEFAULT], task_hist_nivcsw); Hahahaha, I think not. > + > +static struct { > + bool is_sched_usf_enabled; > + bool is_screen_on; > + int sysctl_sched_usf_up_l0; > + int sysctl_sched_usf_down; > + int sysctl_sched_usf_non_ux; They're not sysctl's. Also letter soup. > + int usf_up_l0; > + int usf_down; > + int usf_non_ux; > +} usf_vdev; > + > +void adjust_task_pred_demand(int cpuid, > + unsigned long *util, > + struct rq *rq) Still a horrible name. > +{ > + /* sysctl_sched_latency/sysctl_sched_min_granularity */ What does this comment want to tell us? > + u32 bl_sw_num = 3; > + > + if (!usf_vdev.is_sched_usf_enabled || !rq || !rq->curr || > + (rq->curr->pid >= PID_MAX_DEFAULT)) Everything after is_sched_usf_enable is nonsense. And even that is dodgy since you had that static branch. You shouldn't be getting here if you're not enabled. > + return; > + > + if (usf_vdev.is_screen_on) { > + if (rq->curr->nivcsw > > + (per_cpu(task_hist_nivcsw, cpuid)[rq->curr->pid] > + + bl_sw_num + 1)) { That's horrible style, if you'd made a helper function with a descriptive name, you'd have readable code. > + (*util) += (*util) >> usf_vdev.usf_up_l0; So when 'current' has more non-voluntary context switches than it last had on this cpu, we add something to the util. That's dodgy as heck. Esp. without comments. This util is not for current. The best you can say is that current contributes to it. > + } else if (rq->curr->nivcsw < > + (per_cpu(task_hist_nivcsw, cpuid)[rq->curr->pid] > + + bl_sw_num - 1) && (rq->nr_running < bl_sw_num)) { Another unreadable expression with style issues, and how we're reducing util. > + (*util) >>= usf_vdev.usf_down; > + } > + per_cpu(task_hist_nivcsw, cpuid)[rq->curr->pid] = > + rq->curr->nivcsw; That's hardly a histogram is it. Or did you mean history? > + } else if (rq->curr->mm) { > + (*util) >>= usf_vdev.usf_non_ux; > + } > + > + trace_sched_usf_adjust_utils(cpuid, usf_vdev.usf_up_l0, > + usf_vdev.usf_down, > + usf_vdev.usf_non_ux, *util); > +} > + > +static int usf_lcd_notifier(struct notifier_block *nb, > + unsigned long val, void *data) > +{ > + struct fb_event *evdata = data; > + unsigned int blank; > + > + if (!evdata) > + return 0; > + > + if (val != FB_EVENT_BLANK) > + return 0; > + > + if (evdata->data && val == FB_EVENT_BLANK) { > + blank = *(int *)(evdata->data); > + > + switch (blank) { > + case FB_BLANK_POWERDOWN: > + usf_vdev.is_screen_on = false; > + if (usf_vdev.sysctl_sched_usf_non_ux != 0) > + static_branch_enable(&adjust_task_pred_set); > + else > + static_branch_disable(&adjust_task_pred_set); > + > + break; > + > + case FB_BLANK_UNBLANK: > + usf_vdev.is_screen_on = true; > + if (usf_vdev.sysctl_sched_usf_up_l0 != 0 || > + usf_vdev.sysctl_sched_usf_down != 0) > + static_branch_enable(&adjust_task_pred_set); > + else > + static_branch_disable(&adjust_task_pred_set); > + break; > + default: > + break; > + } > + > + usf_vdev.is_sched_usf_enabled = true; > + pr_info("%s : usf_vdev.is_screen_on:%b\n", > + __func__, usf_vdev.is_screen_on); > + } > + return NOTIFY_OK; > +} *groan*... this is horrific. Flipping the static branch is _very_ expensive. > + > +static int __init intera_monitor_init(void) > +{ > + int res = -1; > + struct device *dev; > + > + res = fb_register_client(&usf_lcd_nb); > + if (res < 0) { > + pr_err("Failed to register usf_lcd_nb!\n"); > + return res; > + } > + > + /* > + * create a sched_usf in cpu_subsys: > + * /sys/devices/system/cpu/sched_usf/... > + */ > + dev = cpu_subsys.dev_root; > + res = sysfs_create_group(&dev->kobj, &sched_attr_group); > + if (res) { > + fb_unregister_client(&usf_lcd_nb); > + return res; > + } > + static_branch_disable(&adjust_task_pred_set); You used DEFINE_STATIC_BRANCH_FALSE, it is already disabled. > + > + return res; > +} > + > +module_init(intera_monitor_init); > + > +static void __exit intera_monitor_exit(void) > +{ > + struct device *dev; > + > + dev = cpu_subsys.dev_root; > + sysfs_remove_group(&dev->kobj, &sched_attr_group); > + fb_unregister_client(&usf_lcd_nb); > + static_branch_disable(&adjust_task_pred_set); > +} > + > +module_exit(intera_monitor_exit); > + > +MODULE_LICENSE("GPL"); > +MODULE_DESCRIPTION("XiaoMi USF SCHED"); > +MODULE_AUTHOR("Yang Dongdong <yangdongdong@xiaomi.com>"); -ENOTAMODULE
Hi Dongdong On 08/04/20 15:50, Dongdong Yang wrote: > +What: /sys/devices/system/cpu/sched_usf > + /sys/devices/system/cpu/sched_usf/sched_usf_non_ux_r > + /sys/devices/system/cpu/sched_usf/sched_usf_up_l0_r > + /sys/devices/system/cpu/sched_usf/sched_usf_down_r > +Date: Aug 2020 > +Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org> > +Description: User Sensitive Feedback factor auxiliary scheduling which > + is providing more utils adjustment settings to the high level > + by scenario identification. > + sched_usf_non_ux_r: > + The ratio of utils is cut down on screen off. The > + default value is 0, which no util is adjusted on sugov > + calculating utils to select cpufreq. Its range is > + [-100 , 0]. If its value falls into [-50, 0), the half > + of utils, which calculates cpufreq, shall be cut down. > + If its value falls into [-100, -50), only a quarter of > + utils are left to continue to calculate cpufreq. > + It is expected to be set [-100, 0) once enter into the > + identificated scenario, such as listen to music on > + screen off, and recover to 0 on out of the scenario, > + such as screen on. > + > + sched_usf_up_l0_r: > + The ratio of utils is boost up on screen on. The > + default value is 0, which no util is adjusted on sugov > + calculates utils to select cpufreq. Its range is [0 , 100]. > + If its value falls into (0, 50], a quarter of extra utils, > + which calculate cpufreq, shall be added. If its value > + falls into (50, 100], the half of extra utils are added > + to continue to calculate cpufreq. > + It is expected to be set (0, 100] once enter into the > + identificated scenario, such as browsing videolet on > + screen on, and recover to 0 on out of the scenario, > + such as screen off or videolet into background. > + > + sched_usf_down_r: > + The ratio of utils is cut down on screen on. The > + default value is 0, which no util is adjusted on sugov > + calculating utils to select cpufreq. Its range is > + [-100 , 0]. If its value falls into [-50, 0), the half > + of utils, which calculate cpufreq, shall be cut down. > + If its value falls into [-100, -50), only a quarter of > + utils are left to continue to calculate cpufreq. > + It is expected to be set [-100, 0) once enter into the > + identificated scenario, such as browsing videolet on > + screen on, and recover to 0 on out of the scenario, > + such as screen off or vidolet into background. AFACS you're duplicating util clamp functionality here. You can already use util clamp to boost tasks on screen on, and cap them on screen off. And extra brownie points; you can already use that on android 4.19 and 5.4 kernels (I'm assuming the battery device is android based, sorry). Any reason why util clamp isn't giving you what you want? To cap the system on screen off you need to # Don't allow the util to go above 512 echo 512 > /proc/sys/kernel/sched_util_clamp_min echo 512 > /proc/sys/kernel/sched_util_clamp_max To boost the system on screen on, you need first to lift the capping done above # Allow util to use the full range again echo 1024 > /proc/sys/kernel/sched_util_clamp_min echo 1024 > /proc/sys/kernel/sched_util_clamp_max # This is pseudo C code for_each_important_task(p) { /* * boost the task utilization to start from 512. */ sched_attr attr = { .util_min = 512, .util_max = 1024 }; sched_setattr(p, attr); } /* undo boosting once system has settled down */ for_each_important_task(p) { /* * reset util_min back to 0, or whatever value you want. */ sched_attr attr = { .util_min = 0, .util_max = 1024 }; sched_setattr(p, attr); } There's a cgroup API for util clamp too. Thanks -- Qais Yousef
On 8/4/20 12:50 AM, Dongdong Yang wrote: > From: Dongdong Yang <yangdongdong@xiaomi.com> > > --- > > diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu > index b555df8..e299418 100644 > --- a/Documentation/ABI/testing/sysfs-devices-system-cpu > +++ b/Documentation/ABI/testing/sysfs-devices-system-cpu > @@ -614,3 +614,51 @@ Description: SPURR ticks for cpuX when it was idle. > > This sysfs interface exposes the number of SPURR ticks > for cpuX when it was idle. > + > +What: /sys/devices/system/cpu/sched_usf > + /sys/devices/system/cpu/sched_usf/sched_usf_non_ux_r > + /sys/devices/system/cpu/sched_usf/sched_usf_up_l0_r > + /sys/devices/system/cpu/sched_usf/sched_usf_down_r > +Date: Aug 2020 > +Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org> > +Description: User Sensitive Feedback factor auxiliary scheduling which > + is providing more utils adjustment settings to the high level > + by scenario identification. what is "utils"? > + sched_usf_non_ux_r: > + The ratio of utils is cut down on screen off. The same question. > + default value is 0, which no util is adjusted on sugov what is "sugov"? > + calculating utils to select cpufreq. Its range is > + [-100 , 0]. If its value falls into [-50, 0), the half > + of utils, which calculates cpufreq, shall be cut down. > + If its value falls into [-100, -50), only a quarter of > + utils are left to continue to calculate cpufreq. > + It is expected to be set [-100, 0) once enter into the > + identificated scenario, such as listen to music on ^^^^^^^^^^^^^ not a word. > + screen off, and recover to 0 on out of the scenario, > + such as screen on. > + > + sched_usf_up_l0_r: > + The ratio of utils is boost up on screen on. The > + default value is 0, which no util is adjusted on sugov > + calculates utils to select cpufreq. Its range is [0 , 100]. > + If its value falls into (0, 50], a quarter of extra utils, > + which calculate cpufreq, shall be added. If its value > + falls into (50, 100], the half of extra utils are added > + to continue to calculate cpufreq. > + It is expected to be set (0, 100] once enter into the > + identificated scenario, such as browsing videolet on what is "videolet"? > + screen on, and recover to 0 on out of the scenario, > + such as screen off or videolet into background. > + > + sched_usf_down_r: > + The ratio of utils is cut down on screen on. The > + default value is 0, which no util is adjusted on sugov > + calculating utils to select cpufreq. Its range is > + [-100 , 0]. If its value falls into [-50, 0), the half > + of utils, which calculate cpufreq, shall be cut down. > + If its value falls into [-100, -50), only a quarter of > + utils are left to continue to calculate cpufreq. > + It is expected to be set [-100, 0) once enter into the > + identificated scenario, such as browsing videolet on > + screen on, and recover to 0 on out of the scenario, > + such as screen off or vidolet into background.
On 08/05/20 03:33, Dongdong Yang wrote: > Appreciate Qais for your above comments. I believe the clamp is very good for > terminal devices per pid or cgroup setting. I really hope it works for the > extended scenario, "screen off", although it has a potential side effect on > "screen on" response because it needs to be recovered at high level with > latency. I set "512" to sched_util_clamp_min and max on screen off for our > developing device with android kernel5.4. However, it still could not > replace sched_usf_non_ux_r from the test result as attachment. The cpufreq > could not go down in time. > Screenshot at 2020-08-05 09:56:38.png Please fix your email client so that it doesn't send in HTML. LKML will reject HTML emails. I can't interpret the numbers in the pictures. Can you help explain what am I looking at? I did see an issue with frequency not capped immediately when the system was busy. I am still trying to debug that. I already fixed one problem related to iowait boost not honouring uclamp requests, I will be posting a patch for this soon. If you have IO heavy workload, then iowait boost will cause schedutil to run at high frequency, and uclamp capping is not applied in that path. Can you trace what happens inside uclamp_rq_util_with() when it's called from sched_cpu_util()? The clamp should be applied quickly, so it's a bug we need to fix. In my case I noticed if I ctrl+Z then `fg`, the cap is applied. My hands are full to look at this soon. So if you can trace it, that'd be great. Can you expand more on your worry for "screen on"? The only latency I see is userspace not being able to set uclamp values quickly. But since it seems you already can set sched_usf_non_ux_r from userspace with acceptable results, then uclamp should be able to cover the same functionality. What am I missing? Thanks -- Qais Yousef
On 08/05/20 19:13, Dongdong Yang wrote: > Appreciate Qais for your clamp implementation. I would like to add traces > for uclamp_rq_util_with and feedback you if I run into any issues. Thanks. FYI, top posting in LKML is frowned upon. Please put your answer underneath the quoted text. > > The util would not be adjusted as soon as FB screen on notification be > received by USF from kernel level if it is set by sched_usf_non_ux, no > matter whether screen on or off. However, sched_util_clamp_min/max have not > been recovered until user space screen on detection. The screen on response > would not be in time for the sensitive user when many background tasks are > running. Whether the kernel module could also > set sched_util_clamp_min/max? For boosting, are you just changing the sysctl or are you actively using sched_setattr() to boost tasks too? Please have a look at the documentation for the sysctl interface. https://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git/tree/Documentation/admin-guide/sysctl/kernel.rst?h=sched/core#n1065 In summary, they just control the _allowed_ levels. So you can use it to cap/throttle the maximum performance level the system is running at. But you can't use it to boost the whole system. You must use the sched_setattr() to boost important tasks individually or if all the tasks are in a cgroup you can use that. For cgroup interface there's a caveat. If you want to use it let me know so I can explain how boosting would work there. I advise to use the sched_setattr() interface to target and boost those important tasks only. You can as well be smart and target all the background tasks to cap them via sched_setattr(). In this case you wouldn't have to modify the sysctl_sched_util_clamp_min/max. I don't see uclamp being a suitable interface for in-kernel users. PM_QOS is more suitable in my opinion for in-kernel users if you want to impact the overall system performance. I might have misunderstood what you were saying above. If so, can you please rephrase? Thanks -- Qais Yousef
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu index b555df8..e299418 100644 --- a/Documentation/ABI/testing/sysfs-devices-system-cpu +++ b/Documentation/ABI/testing/sysfs-devices-system-cpu @@ -614,3 +614,51 @@ Description: SPURR ticks for cpuX when it was idle. This sysfs interface exposes the number of SPURR ticks for cpuX when it was idle. + +What: /sys/devices/system/cpu/sched_usf + /sys/devices/system/cpu/sched_usf/sched_usf_non_ux_r + /sys/devices/system/cpu/sched_usf/sched_usf_up_l0_r + /sys/devices/system/cpu/sched_usf/sched_usf_down_r +Date: Aug 2020 +Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org> +Description: User Sensitive Feedback factor auxiliary scheduling which + is providing more utils adjustment settings to the high level + by scenario identification. + sched_usf_non_ux_r: + The ratio of utils is cut down on screen off. The + default value is 0, which no util is adjusted on sugov + calculating utils to select cpufreq. Its range is + [-100 , 0]. If its value falls into [-50, 0), the half + of utils, which calculates cpufreq, shall be cut down. + If its value falls into [-100, -50), only a quarter of + utils are left to continue to calculate cpufreq. + It is expected to be set [-100, 0) once enter into the + identificated scenario, such as listen to music on + screen off, and recover to 0 on out of the scenario, + such as screen on. + + sched_usf_up_l0_r: + The ratio of utils is boost up on screen on. The + default value is 0, which no util is adjusted on sugov + calculates utils to select cpufreq. Its range is [0 , 100]. + If its value falls into (0, 50], a quarter of extra utils, + which calculate cpufreq, shall be added. If its value + falls into (50, 100], the half of extra utils are added + to continue to calculate cpufreq. + It is expected to be set (0, 100] once enter into the + identificated scenario, such as browsing videolet on + screen on, and recover to 0 on out of the scenario, + such as screen off or videolet into background. + + sched_usf_down_r: + The ratio of utils is cut down on screen on. The + default value is 0, which no util is adjusted on sugov + calculating utils to select cpufreq. Its range is + [-100 , 0]. If its value falls into [-50, 0), the half + of utils, which calculate cpufreq, shall be cut down. + If its value falls into [-100, -50), only a quarter of + utils are left to continue to calculate cpufreq. + It is expected to be set [-100, 0) once enter into the + identificated scenario, such as browsing videolet on + screen on, and recover to 0 on out of the scenario, + such as screen off or vidolet into background. diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index e917501..a21c6ad 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -224,6 +224,17 @@ config CPUFREQ_DT_PLATDEV If in doubt, say N. +config SCHED_USF + bool "User Sensitive Factors for Scheduler" + depends on CPU_FREQ_GOV_SCHEDUTIL && FB + help + Select this option to enable the adjustment on the cpufreq with + the user sensitive factors on schedule. It is special for mobile + devices which more power care and quick response requirement on + screen on. + + If unsure, say N. + if X86 source "drivers/cpufreq/Kconfig.x86" endif diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index ed168b0..d5e20b7 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -488,6 +488,41 @@ TRACE_EVENT(sched_process_hang, #endif /* CONFIG_DETECT_HUNG_TASK */ /* + * Tracepoint for tracking tuils be adjusted by USF: + */ +#ifdef CONFIG_SCHED_USF +TRACE_EVENT(sched_usf_adjust_utils, + + TP_PROTO(int cpu_id, int up, int down, int nonux, unsigned long utils), + + TP_ARGS(cpu_id, up, down, nonux, utils), + + TP_STRUCT__entry( + __field(int, cpu_id) + __field(int, up) + __field(int, down) + __field(int, nonux) + __field(unsigned long, utils) + ), + + TP_fast_assign( + __entry->cpu_id = cpu_id; + __entry->up = up; + __entry->down = down; + __entry->nonux = nonux; + __entry->utils = utils; + ), + + TP_printk("cpu_id=%d up=%d down=%d nonux=%d utils=%lu", + __entry->cpu_id, + __entry->up, + __entry->down, + __entry->nonux, + __entry->utils) +); +#endif /* CONFIG_SCHED_USF */ + +/* * Tracks migration of tasks from one runqueue to another. Can be used to * detect if automatic NUMA balancing is bouncing between nodes. */ diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile index 5fc9c9b..58a0e7b 100644 --- a/kernel/sched/Makefile +++ b/kernel/sched/Makefile @@ -36,3 +36,4 @@ obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o obj-$(CONFIG_MEMBARRIER) += membarrier.o obj-$(CONFIG_CPU_ISOLATION) += isolation.o obj-$(CONFIG_PSI) += psi.o +obj-$(CONFIG_SCHED_USF) += usf.o diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index 7fbaee2..79a0040 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -289,12 +289,15 @@ unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs, return min(max, util); } +DEFINE_STATIC_KEY_FALSE(adjust_task_pred_set); static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu) { struct rq *rq = cpu_rq(sg_cpu->cpu); unsigned long util = cpu_util_cfs(rq); unsigned long max = arch_scale_cpu_capacity(sg_cpu->cpu); + if (static_branch_unlikely(&adjust_task_pred_set)) + adjust_task_pred_demand(sg_cpu->cpu, &util, rq); sg_cpu->max = max; sg_cpu->bw_dl = cpu_bw_dl(rq); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 877fb08..496130b 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -2540,5 +2540,15 @@ static inline bool is_per_cpu_kthread(struct task_struct *p) } #endif +DECLARE_STATIC_KEY_FALSE(adjust_task_pred_set); +#ifdef CONFIG_SCHED_USF +void adjust_task_pred_demand(int cpuid, unsigned long *util, + struct rq *rq); +#else +static inline void adjust_task_pred_demand(int cpuid, + unsigned long *util, struct rq *rq) +{ } +#endif + void swake_up_all_locked(struct swait_queue_head *q); void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait); diff --git a/kernel/sched/usf.c b/kernel/sched/usf.c new file mode 100644 index 0000000..d4d7998 --- /dev/null +++ b/kernel/sched/usf.c @@ -0,0 +1,294 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020 XiaoMi Inc. + * Author: Yang Dongdong <yangdongdong@xiaomi.com> + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See http://www.gnu.org/licenses/gpl-2.0.html for more details. + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/platform_device.h> +#include <linux/kthread.h> +#include <linux/cpu.h> +#include <linux/sysfs.h> +#include <linux/kthread.h> +#include <linux/kobject.h> +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/kallsyms.h> +#include <linux/fb.h> +#include <linux/notifier.h> +#include <trace/events/sched.h> +#include "sched.h" + +#define BOOST_MIN_V -100 +#define BOOST_MAX_V 100 +#define LEVEL_TOP 3 + +#define USF_TAG "[usf_sched]" + +DEFINE_PER_CPU(unsigned long[PID_MAX_DEFAULT], task_hist_nivcsw); + +static struct { + bool is_sched_usf_enabled; + bool is_screen_on; + int sysctl_sched_usf_up_l0; + int sysctl_sched_usf_down; + int sysctl_sched_usf_non_ux; + int usf_up_l0; + int usf_down; + int usf_non_ux; +} usf_vdev; + +void adjust_task_pred_demand(int cpuid, + unsigned long *util, + struct rq *rq) +{ + /* sysctl_sched_latency/sysctl_sched_min_granularity */ + u32 bl_sw_num = 3; + + if (!usf_vdev.is_sched_usf_enabled || !rq || !rq->curr || + (rq->curr->pid >= PID_MAX_DEFAULT)) + return; + + if (usf_vdev.is_screen_on) { + if (rq->curr->nivcsw > + (per_cpu(task_hist_nivcsw, cpuid)[rq->curr->pid] + + bl_sw_num + 1)) { + (*util) += (*util) >> usf_vdev.usf_up_l0; + } else if (rq->curr->nivcsw < + (per_cpu(task_hist_nivcsw, cpuid)[rq->curr->pid] + + bl_sw_num - 1) && (rq->nr_running < bl_sw_num)) { + (*util) >>= usf_vdev.usf_down; + } + per_cpu(task_hist_nivcsw, cpuid)[rq->curr->pid] = + rq->curr->nivcsw; + } else if (rq->curr->mm) { + (*util) >>= usf_vdev.usf_non_ux; + } + + trace_sched_usf_adjust_utils(cpuid, usf_vdev.usf_up_l0, + usf_vdev.usf_down, + usf_vdev.usf_non_ux, *util); +} + +static int usf_lcd_notifier(struct notifier_block *nb, + unsigned long val, void *data) +{ + struct fb_event *evdata = data; + unsigned int blank; + + if (!evdata) + return 0; + + if (val != FB_EVENT_BLANK) + return 0; + + if (evdata->data && val == FB_EVENT_BLANK) { + blank = *(int *)(evdata->data); + + switch (blank) { + case FB_BLANK_POWERDOWN: + usf_vdev.is_screen_on = false; + if (usf_vdev.sysctl_sched_usf_non_ux != 0) + static_branch_enable(&adjust_task_pred_set); + else + static_branch_disable(&adjust_task_pred_set); + + break; + + case FB_BLANK_UNBLANK: + usf_vdev.is_screen_on = true; + if (usf_vdev.sysctl_sched_usf_up_l0 != 0 || + usf_vdev.sysctl_sched_usf_down != 0) + static_branch_enable(&adjust_task_pred_set); + else + static_branch_disable(&adjust_task_pred_set); + break; + default: + break; + } + + usf_vdev.is_sched_usf_enabled = true; + pr_info("%s : usf_vdev.is_screen_on:%b\n", + __func__, usf_vdev.is_screen_on); + } + return NOTIFY_OK; +} + +static struct notifier_block usf_lcd_nb = { + .notifier_call = usf_lcd_notifier, + .priority = INT_MAX, +}; + +static ssize_t store_sched_usf_up_l0_r(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + int val = 0; + int ret = 0; + + ret = kstrtoint(buf, 0, &val); + if (ret) + return ret; + + if (val == 0) { + usf_vdev.sysctl_sched_usf_up_l0 = val; + usf_vdev.usf_up_l0 = 0; + } else if ((val > 0) && (val <= BOOST_MAX_V)) { + usf_vdev.sysctl_sched_usf_up_l0 = val; + usf_vdev.usf_up_l0 = LEVEL_TOP - + DIV_ROUND_UP(val, BOOST_MAX_V / 2); + ret = count; + } else { + pr_err(USF_TAG "%d should fall into [%d %d]", + val, 0, BOOST_MAX_V); + ret = -EINVAL; + } + if ((usf_vdev.sysctl_sched_usf_up_l0 == 0) && + (usf_vdev.sysctl_sched_usf_down == 0)) + static_branch_disable(&adjust_task_pred_set); + else + static_branch_enable(&adjust_task_pred_set); + + return ret; +} + +static ssize_t store_sched_usf_down_r(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + int val = 0; + int ret = 0; + + ret = kstrtoint(buf, 0, &val); + if (ret) + return ret; + + if ((val >= BOOST_MIN_V) && (val <= 0)) { + usf_vdev.sysctl_sched_usf_down = val; + usf_vdev.usf_down = DIV_ROUND_UP(-val, -BOOST_MIN_V / 2); + ret = count; + } else { + pr_err(USF_TAG "%d should fall into [%d %d]", + val, BOOST_MIN_V, 0); + ret = -EINVAL; + } + if ((usf_vdev.sysctl_sched_usf_up_l0 == 0) && + (usf_vdev.sysctl_sched_usf_down == 0)) + static_branch_disable(&adjust_task_pred_set); + else + static_branch_enable(&adjust_task_pred_set); + + return ret; +} + +static ssize_t store_sched_usf_non_ux_r(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + int val = 0; + int ret = 0; + + ret = kstrtoint(buf, 0, &val); + if (ret) + return ret; + + if ((val >= BOOST_MIN_V) && (val <= 0)) { + usf_vdev.sysctl_sched_usf_non_ux = val; + usf_vdev.usf_non_ux = DIV_ROUND_UP(-val, -BOOST_MIN_V / 2); + ret = count; + } else { + pr_err(USF_TAG "%d should fall into [%d %d]", + val, BOOST_MIN_V, 0); + ret = -EINVAL; + } + if (usf_vdev.sysctl_sched_usf_non_ux == 0) + static_branch_disable(&adjust_task_pred_set); + else + static_branch_enable(&adjust_task_pred_set); + + return ret; +} + +#define usf_attr_rw(_name) \ +static struct kobj_attribute _name = \ +__ATTR(_name, 0664, show_##_name, store_##_name) + +#define usf_show_node(_name, _value) \ +static ssize_t show_##_name \ +(struct kobject *kobj, struct kobj_attribute *attr, char *buf) \ +{ \ + return sprintf(buf, "%d", usf_vdev.sysctl_##_value); \ +} + +usf_show_node(sched_usf_up_l0_r, sched_usf_up_l0); +usf_show_node(sched_usf_down_r, sched_usf_down); +usf_show_node(sched_usf_non_ux_r, sched_usf_non_ux); + +usf_attr_rw(sched_usf_up_l0_r); +usf_attr_rw(sched_usf_down_r); +usf_attr_rw(sched_usf_non_ux_r); + +static struct attribute *sched_attrs[] = { + &sched_usf_up_l0_r.attr, + &sched_usf_down_r.attr, + &sched_usf_non_ux_r.attr, + NULL, +}; + +static struct attribute_group sched_attr_group = { + .attrs = sched_attrs, +}; + +static int __init intera_monitor_init(void) +{ + int res = -1; + struct device *dev; + + res = fb_register_client(&usf_lcd_nb); + if (res < 0) { + pr_err("Failed to register usf_lcd_nb!\n"); + return res; + } + + /* + * create a sched_usf in cpu_subsys: + * /sys/devices/system/cpu/sched_usf/... + */ + dev = cpu_subsys.dev_root; + res = sysfs_create_group(&dev->kobj, &sched_attr_group); + if (res) { + fb_unregister_client(&usf_lcd_nb); + return res; + } + static_branch_disable(&adjust_task_pred_set); + + return res; +} + +module_init(intera_monitor_init); + +static void __exit intera_monitor_exit(void) +{ + struct device *dev; + + dev = cpu_subsys.dev_root; + sysfs_remove_group(&dev->kobj, &sched_attr_group); + fb_unregister_client(&usf_lcd_nb); + static_branch_disable(&adjust_task_pred_set); +} + +module_exit(intera_monitor_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("XiaoMi USF SCHED"); +MODULE_AUTHOR("Yang Dongdong <yangdongdong@xiaomi.com>");