@@ -194,24 +194,65 @@ u32 resctrl_arch_system_num_rmid_idx(void)
return num_reqpartid << closid_shift;
}
+static u32 rmid2reqpartid(u32 rmid)
+{
+ u8 pmg_shift = fls(mpam_pmg_max);
+
+ WARN_ON_ONCE(pmg_shift > 8);
+
+ return rmid >> pmg_shift;
+}
+
+static u32 rmid2pmg(u32 rmid)
+{
+ u8 pmg_shift = fls(mpam_pmg_max);
+ u32 pmg_mask = ~(~0 << pmg_shift);
+
+ return rmid & pmg_mask;
+}
+
+static u32 req2intpartid(u32 reqpartid)
+{
+ u8 intpartid_shift = fls(mpam_intpartid_max);
+ u32 intpartid_mask = ~(~0 << intpartid_shift);
+
+ return reqpartid & intpartid_mask;
+}
+
+/*
+ * To avoid the reuse of rmid across multiple control groups, we will
+ * check the incoming closid to prevent rmid from being reallocated by
+ * resctrl_find_free_rmid().
+ *
+ * If the closid and rmid do not match upon inspection, immediately
+ * returns an invalid rmid. A valid rmid will not exceed 24 bits.
+ */
u32 resctrl_arch_rmid_idx_encode(u32 closid, u32 rmid)
{
- u8 closid_shift = fls(mpam_pmg_max);
+ u32 reqpartid = rmid2reqpartid(rmid);
+ u32 intpartid = req2intpartid(reqpartid);
+
+ if (cdp_enabled)
+ intpartid >>= 1;
- WARN_ON_ONCE(closid_shift > 8);
+ if (closid != intpartid)
+ return U32_MAX;
- return (closid << closid_shift) | rmid;
+ return rmid;
}
void resctrl_arch_rmid_idx_decode(u32 idx, u32 *closid, u32 *rmid)
{
- u8 closid_shift = fls(mpam_pmg_max);
- u32 pmg_mask = ~(~0 << closid_shift);
+ u32 reqpartid = rmid2reqpartid(idx);
+ u32 intpartid = req2intpartid(reqpartid);
- WARN_ON_ONCE(closid_shift > 8);
-
- *closid = idx >> closid_shift;
- *rmid = idx & pmg_mask;
+ if (rmid)
+ *rmid = idx;
+ if (closid) {
+ if (cdp_enabled)
+ intpartid >>= 1;
+ *closid = intpartid;
+ }
}
void resctrl_arch_sched_in(struct task_struct *tsk)
@@ -223,21 +264,20 @@ void resctrl_arch_sched_in(struct task_struct *tsk)
void resctrl_arch_set_cpu_default_closid_rmid(int cpu, u32 closid, u32 rmid)
{
- WARN_ON_ONCE(closid > U16_MAX);
- WARN_ON_ONCE(rmid > U8_MAX);
+ u32 reqpartid = rmid2reqpartid(rmid);
+ u32 pmg = rmid2pmg(rmid);
- if (!cdp_enabled) {
- mpam_set_cpu_defaults(cpu, closid, closid, rmid, rmid);
- } else {
+ WARN_ON_ONCE(reqpartid > U16_MAX);
+ WARN_ON_ONCE(pmg > U8_MAX);
+
+ if (!cdp_enabled)
+ mpam_set_cpu_defaults(cpu, reqpartid, reqpartid, pmg, pmg);
+ else
/*
* When CDP is enabled, resctrl halves the closid range and we
* use odd/even partid for one closid.
*/
- u32 partid_d = resctrl_get_config_index(closid, CDP_DATA);
- u32 partid_i = resctrl_get_config_index(closid, CDP_CODE);
-
- mpam_set_cpu_defaults(cpu, partid_d, partid_i, rmid, rmid);
- }
+ mpam_set_cpu_defaults(cpu, reqpartid, reqpartid + 1, pmg, pmg);
}
void resctrl_arch_sync_cpu_closid_rmid(void *info)
@@ -256,41 +296,41 @@ void resctrl_arch_sync_cpu_closid_rmid(void *info)
void resctrl_arch_set_closid_rmid(struct task_struct *tsk, u32 closid, u32 rmid)
{
- WARN_ON_ONCE(closid > U16_MAX);
- WARN_ON_ONCE(rmid > U8_MAX);
+ u32 reqpartid = rmid2reqpartid(rmid);
+ u32 pmg = rmid2pmg(rmid);
- if (!cdp_enabled) {
- mpam_set_task_partid_pmg(tsk, closid, closid, rmid, rmid);
- } else {
- u32 partid_d = resctrl_get_config_index(closid, CDP_DATA);
- u32 partid_i = resctrl_get_config_index(closid, CDP_CODE);
+ WARN_ON_ONCE(reqpartid > U16_MAX);
+ WARN_ON_ONCE(pmg > U8_MAX);
+
+ if (!cdp_enabled)
+ mpam_set_task_partid_pmg(tsk, reqpartid, reqpartid, pmg, pmg);
+ else
+ mpam_set_task_partid_pmg(tsk, reqpartid, reqpartid + 1, pmg, pmg);
- mpam_set_task_partid_pmg(tsk, partid_d, partid_i, rmid, rmid);
- }
}
bool resctrl_arch_match_closid(struct task_struct *tsk, u32 closid)
{
u64 regval = mpam_get_regval(tsk);
- u32 tsk_closid = FIELD_GET(MPAM1_EL1_PARTID_D, regval);
+ u32 tsk_partid = FIELD_GET(MPAM1_EL1_PARTID_D, regval);
+
+ tsk_partid = req2intpartid(tsk_partid);
if (cdp_enabled)
- tsk_closid >>= 1;
+ tsk_partid >>= 1;
- return tsk_closid == closid;
+ return tsk_partid == closid;
}
/* The task's pmg is not unique, the partid must be considered too */
bool resctrl_arch_match_rmid(struct task_struct *tsk, u32 closid, u32 rmid)
{
u64 regval = mpam_get_regval(tsk);
- u32 tsk_closid = FIELD_GET(MPAM1_EL1_PARTID_D, regval);
- u32 tsk_rmid = FIELD_GET(MPAM1_EL1_PMG_D, regval);
-
- if (cdp_enabled)
- tsk_closid >>= 1;
+ u32 tsk_partid = FIELD_GET(MPAM1_EL1_PARTID_D, regval);
+ u32 tsk_pmg = FIELD_GET(MPAM1_EL1_PMG_D, regval);
- return (tsk_closid == closid) && (tsk_rmid == rmid);
+ return (tsk_partid == rmid2reqpartid(rmid)) &&
+ (tsk_pmg == rmid2pmg(rmid));
}
struct rdt_resource *resctrl_arch_get_resource(enum resctrl_res_level l)
@@ -412,7 +452,7 @@ int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_mon_domain *d,
cfg.mon = resctrl_arch_rmid_idx_encode(closid, rmid);
cfg.match_pmg = true;
- cfg.pmg = rmid;
+ cfg.pmg = rmid2pmg(rmid);
cfg.opts = resctrl_evt_config_to_mpam(dom->mbm_local_evt_cfg);
if (irqs_disabled()) {
@@ -420,7 +460,7 @@ int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_mon_domain *d,
err = -EIO;
} else {
if (cdp_enabled) {
- cfg.partid = closid << 1;
+ cfg.partid = rmid2reqpartid(rmid);
err = mpam_msmon_read(dom->comp, &cfg, type, val);
if (err)
return err;
@@ -430,7 +470,7 @@ int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_mon_domain *d,
if (!err)
*val += cdp_val;
} else {
- cfg.partid = closid;
+ cfg.partid = rmid2reqpartid(rmid);
err = mpam_msmon_read(dom->comp, &cfg, type, val);
}
}
@@ -449,18 +489,18 @@ void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_mon_domain *d,
cfg.mon = resctrl_arch_rmid_idx_encode(closid, rmid);
cfg.match_pmg = true;
- cfg.pmg = rmid;
+ cfg.pmg = rmid2pmg(rmid);
dom = container_of(d, struct mpam_resctrl_dom, resctrl_mon_dom);
if (cdp_enabled) {
- cfg.partid = closid << 1;
+ cfg.partid = rmid2reqpartid(rmid);
mpam_msmon_reset_mbwu(dom->comp, &cfg);
cfg.partid += 1;
mpam_msmon_reset_mbwu(dom->comp, &cfg);
} else {
- cfg.partid = closid;
+ cfg.partid = rmid2reqpartid(rmid);
mpam_msmon_reset_mbwu(dom->comp, &cfg);
}
}
The Narrow Partid feature supports for the MPAM driver statically or dynamically allocates all reqPARTIDs to various intPARTIDs. The new RMID allocation strategy will check whether there are available reqPARTIDs for the incoming closid, rather than simply checking for available PMGs. For a mixture of MSCs system, for MSCs that do not support narrow-partid, we use the PARTIDs exceeding the number of closids as reqPARTIDs for expanding the monitoring groups. Therefore, we will expand the information contained in the RMID, so that it includes not only PMG, but also reqPARTIDs information. The new RMID would be like: RMID = (reqPARTID << shift | PMG). In order to keep the existing resctrl layer interfaces, the reqPARTIDs are allocated statically. Here req2intpartid() linearly binds each reqPARTID to the corresponding intPARTID. If reqPARTID needs to support dynamic allocation in the future, then simply rewrite req2intpartid() and can be replaced with a table-lookup approach. The static mapping relationships between each group's closid/rmid and the respective MSCs' intPARTID/reqPARTID/PARTID are illustrated: n - Indicates the total number of intPARTIDs m - Indicates the number of reqPARTIDs per intPARTID P - Partition group M - Monitoring group Group closid rmid.reqPARTID MSCs with narrow-partid MSCs without narrow-partid P1 0 intPARTID_1 PARTID_1 M1_1 0 0 ├── reqPARTID_1_1 ├── PARTID_1 M1_2 0 0+n ├── reqPARTID_1_2 ├── PARTID_1_2 M1_3 0 0+n*2 ├── reqPARTID_1_3 ├── PARTID_1_3 ... ├── ... ├── ... M1_m 0 0+n*(m-1) └── reqPARTID_1_m └── PARTID_1_m P2 1 intPARTID_2 PARTID_2 M2_1 1 1 ├── reqPARTID_2_1 ├── PARTID_2 M2_2 1 1+n ├── reqPARTID_2_2 ├── PARTID_2_2 M2_3 1 1+n*2 ├── reqPARTID_2_3 ├── PARTID_2_3 ... ├── ... ├── ... M2_m 1 1+n*(m-1) └── reqPARTID_2_m └── PARTID_2_m Pn (n-1) intPARTID_n PARTID_n Mn_1 (n-1) (n-1) ├── reqPARTID_n_1 ├── PARTID_n Mn_2 (n-1) (n-1)+n ├── reqPARTID_n_2 ├── PARTID_n_2 Mn_3 (n-1) (n-1)+n*2 ├── reqPARTID_n_3 ├── PARTID_n_3 ... ├── ... ├── ... Mn_m (n-1) (n-1)+n*(m-1) = n*m-1 └── reqPARTID_n_m └── PARTID_n_m The resctrl layer uses the new conversion functions rmid2reqpartid() and rmid2pmg() respectively to gain the new reqPARTID/PMG pair by RMID. And still, the translation between closid and intPARTID follows the original conversion logic of closid: intPARTID = resctrl_get_config_index(closid, resctrl_conf_type). It can be noted that the approach of allocating the first n IDs to intPARTIDs keeps the existing conversion between closid and intPARTID. We still use the resctrl_get_config_index() for conversion, maintaining the original semantics during the MPAM configuration updating. Essentially, the narrowing feature is an enhanced monitoring feature, we only expand the definition of rmid, while reqPARTID is only used in monitoring-related processes. Now each control group has m (req)PARTIDs, which are used to expand the number of monitoring groups under one control group. Therefore, the number of monitoring groups is no longer limited by the range of MPAM's PMG, which enhances the extensibility of the system's monitoring capabilities. Signed-off-by: Zeng Heng <zengheng4@huawei.com> --- drivers/platform/arm64/mpam/mpam_resctrl.c | 126 ++++++++++++++------- 1 file changed, 83 insertions(+), 43 deletions(-)