@@ -100,6 +100,11 @@ static inline bool resctrl_arch_is_mbm_local_enabled(void)
return (rdt_mon_features & (1 << QOS_L3_MBM_LOCAL_EVENT_ID));
}
+static bool resctrl_arch_mbm_has_long_counter(void)
+{
+ return false;
+}
+
/*
* __resctrl_sched_in() - Writes the task's CLOSid/RMID to IA32_PQR_MSR
*
@@ -870,6 +870,8 @@ static void read_msmon_ctl_flt_vals(struct mon_read *m, u32 *ctl_val,
case mpam_feat_msmon_mbwu:
*ctl_val = mpam_read_monsel_reg(msc, CFG_MBWU_CTL);
*flt_val = mpam_read_monsel_reg(msc, CFG_MBWU_FLT);
+ if (mpam_ris_has_mbwu_long_counter(m->ris))
+ *ctl_val &= ~MSMON_CFG_x_CTL_OFLOW_STATUS;
break;
default:
return;
@@ -81,6 +81,17 @@ bool resctrl_arch_is_mbm_total_enabled(void)
return mbm_total_class;
}
+bool resctrl_arch_mbm_has_long_counter(void)
+{
+ if (mbm_local_class)
+ return !!mpam_has_feature(mpam_feat_msmon_mbwu_63counter, &mbm_local_class->props);
+
+ if (mbm_total_class)
+ return !!mpam_has_feature(mpam_feat_msmon_mbwu_63counter, &mbm_total_class->props);
+
+ return false;
+}
+
bool resctrl_arch_get_cdp_enabled(enum resctrl_res_level rid)
{
switch (rid) {
@@ -2774,7 +2774,7 @@ static int rdt_get_tree(struct fs_context *fc)
if (resctrl_arch_alloc_capable() || resctrl_arch_mon_capable())
resctrl_mounted = true;
- if (resctrl_is_mbm_enabled()) {
+ if (resctrl_is_mbm_enabled() && (!resctrl_arch_mbm_has_long_counter())) {
list_for_each_entry(dom, &l3->domains, list)
mbm_setup_overflow_handler(dom, MBM_OVERFLOW_INTERVAL,
RESCTRL_PICK_ANY_CPU);
@@ -4014,7 +4014,7 @@ static void _resctrl_offline_domain(struct rdt_resource *r,
if (resctrl_mounted && resctrl_arch_mon_capable())
rmdir_mondata_subdir_allrdtgrp(r, d->id);
- if (resctrl_is_mbm_enabled())
+ if (resctrl_is_mbm_enabled() && (!resctrl_arch_mbm_has_long_counter()))
cancel_delayed_work(&d->mbm_over);
if (resctrl_arch_is_llc_occupancy_enabled() && has_busy_rmid(d)) {
/*
@@ -4087,7 +4087,7 @@ static int _resctrl_online_domain(struct rdt_resource *r, struct rdt_domain *d)
if (err)
return err;
- if (resctrl_is_mbm_enabled()) {
+ if (resctrl_is_mbm_enabled() && (!resctrl_arch_mbm_has_long_counter())) {
INIT_DELAYED_WORK(&d->mbm_over, mbm_handle_overflow);
mbm_setup_overflow_handler(d, MBM_OVERFLOW_INTERVAL,
RESCTRL_PICK_ANY_CPU);
@@ -4155,7 +4155,8 @@ void resctrl_offline_cpu(unsigned int cpu)
d = resctrl_get_domain_from_cpu(cpu, l3);
if (d) {
- if (resctrl_is_mbm_enabled() && cpu == d->mbm_work_cpu) {
+ if (resctrl_is_mbm_enabled() && (!resctrl_arch_mbm_has_long_counter()) &&
+ cpu == d->mbm_work_cpu) {
cancel_delayed_work(&d->mbm_over);
mbm_setup_overflow_handler(d, 0, cpu);
}
@@ -79,6 +79,7 @@ bool resctrl_arch_mon_capable(void);
bool resctrl_arch_is_llc_occupancy_enabled(void);
bool resctrl_arch_is_mbm_local_enabled(void);
bool resctrl_arch_is_mbm_total_enabled(void);
+bool resctrl_arch_mbm_has_long_counter(void);
/* reset cached configurations, then all devices */
void resctrl_arch_reset_resources(void);
It takes a substantial amount of time for long counters, which have more than 32 bits, to cause overflow. To support enable/disable of a periodic handler for monitoring memory bandwidth, introduce a common function resctrl_arch_mbm_has_long_counter(). FOr MPAM, disable periodic handler if the MSC component is capable of supporting 63-bit counters. Signed-off-by: Shanker Donthineni <sdonthineni@nvidia.com> --- Changes since v1: - Skip 32bit overflow of MBW when checking config mismatch arch/x86/include/asm/resctrl.h | 5 +++++ drivers/platform/mpam/mpam_devices.c | 2 ++ drivers/platform/mpam/mpam_resctrl.c | 11 +++++++++++ fs/resctrl/rdtgroup.c | 9 +++++---- include/linux/arm_mpam.h | 1 + 5 files changed, 24 insertions(+), 4 deletions(-)