@@ -239,6 +239,12 @@ GCR_ACCESSOR_RW(32, 0x130, l2_config)
GCR_ACCESSOR_RO(32, 0x150, sys_config2)
#define CM_GCR_SYS_CONFIG2_MAXVPW GENMASK(3, 0)
+/* GCR_L2-RAM_CONFIG - Configuration & status of L2 cache RAMs */
+GCR_ACCESSOR_RW(64, 0x240, l2_ram_config)
+#define CM_GCR_L2_RAM_CONFIG_PRESENT BIT(31)
+#define CM_GCR_L2_RAM_CONFIG_HCI_DONE BIT(30)
+#define CM_GCR_L2_RAM_CONFIG_HCI_SUPPORTED BIT(29)
+
/* GCR_L2_PFT_CONTROL - Controls hardware L2 prefetching */
GCR_ACCESSOR_RW(32, 0x300, l2_pft_control)
#define CM_GCR_L2_PFT_CONTROL_PAGEMASK GENMASK(31, 12)
@@ -250,6 +256,18 @@ GCR_ACCESSOR_RW(32, 0x308, l2_pft_control_b)
#define CM_GCR_L2_PFT_CONTROL_B_CEN BIT(8)
#define CM_GCR_L2_PFT_CONTROL_B_PORTID GENMASK(7, 0)
+/* GCR_L2_TAG_ADDR - Access addresses in L2 cache tags */
+GCR_ACCESSOR_RW(64, 0x600, l2_tag_addr)
+
+/* GCR_L2_TAG_STATE - Access L2 cache tag state */
+GCR_ACCESSOR_RW(64, 0x608, l2_tag_state)
+
+/* GCR_L2_DATA - Access data in L2 cache lines */
+GCR_ACCESSOR_RW(64, 0x610, l2_data)
+
+/* GCR_L2_ECC - Access ECC information from L2 cache lines */
+GCR_ACCESSOR_RW(64, 0x618, l2_ecc)
+
/* GCR_L2SM_COP - L2 cache op state machine control */
GCR_ACCESSOR_RW(32, 0x620, l2sm_cop)
#define CM_GCR_L2SM_COP_PRESENT BIT(31)
@@ -23,6 +23,7 @@ struct core_boot_config {
};
struct cluster_boot_config {
+ unsigned long *core_power;
struct core_boot_config *core_config;
};
@@ -314,7 +314,9 @@ void mips_cm_lock_other(unsigned int cluster, unsigned int core,
FIELD_PREP(CM3_GCR_Cx_OTHER_VP, vp);
if (cm_rev >= CM_REV_CM3_5) {
- val |= CM_GCR_Cx_OTHER_CLUSTER_EN;
+ if (cluster != cpu_cluster(¤t_cpu_data))
+ val |= CM_GCR_Cx_OTHER_CLUSTER_EN;
+ val |= CM_GCR_Cx_OTHER_GIC_EN;
val |= FIELD_PREP(CM_GCR_Cx_OTHER_CLUSTER, cluster);
val |= FIELD_PREP(CM_GCR_Cx_OTHER_BLOCK, block);
} else {
@@ -25,10 +25,54 @@
#include <asm/time.h>
#include <asm/uasm.h>
-static DECLARE_BITMAP(core_power, NR_CPUS);
struct cluster_boot_config *mips_cps_cluster_bootcfg;
+static void power_up_other_cluster(unsigned int cluster)
+{
+ u32 stat, seq_state;
+ unsigned int timeout;
+
+ mips_cm_lock_other(cluster, CM_GCR_Cx_OTHER_CORE_CM, 0,
+ CM_GCR_Cx_OTHER_BLOCK_LOCAL);
+ stat = read_cpc_co_stat_conf();
+ mips_cm_unlock_other();
+
+ seq_state = stat & CPC_Cx_STAT_CONF_SEQSTATE;
+ seq_state >>= __ffs(CPC_Cx_STAT_CONF_SEQSTATE);
+ if (seq_state == CPC_Cx_STAT_CONF_SEQSTATE_U5)
+ return;
+
+ /* Set endianness & power up the CM */
+ mips_cm_lock_other(cluster, 0, 0, CM_GCR_Cx_OTHER_BLOCK_GLOBAL);
+ write_cpc_redir_sys_config(IS_ENABLED(CONFIG_CPU_BIG_ENDIAN));
+ write_cpc_redir_pwrup_ctl(1);
+ mips_cm_unlock_other();
+
+ /* Wait for the CM to start up */
+ timeout = 1000;
+ mips_cm_lock_other(cluster, CM_GCR_Cx_OTHER_CORE_CM, 0,
+ CM_GCR_Cx_OTHER_BLOCK_LOCAL);
+ while (1) {
+ stat = read_cpc_co_stat_conf();
+ seq_state = stat & CPC_Cx_STAT_CONF_SEQSTATE;
+ seq_state >>= __ffs(CPC_Cx_STAT_CONF_SEQSTATE);
+ if (seq_state == CPC_Cx_STAT_CONF_SEQSTATE_U5)
+ break;
+
+ if (timeout) {
+ mdelay(1);
+ timeout--;
+ } else {
+ pr_warn("Waiting for cluster %u CM to power up... STAT_CONF=0x%x\n",
+ cluster, stat);
+ mdelay(1000);
+ }
+ }
+
+ mips_cm_unlock_other();
+}
+
static unsigned __init core_vpe_count(unsigned int cluster, unsigned core)
{
return min(smp_max_threads, mips_cps_numvps(cluster, core));
@@ -49,6 +93,9 @@ static void __init cps_smp_setup(void)
pr_cont(",");
pr_cont("{");
+ if (mips_cm_revision() >= CM_REV_CM3_5)
+ power_up_other_cluster(cl);
+
ncores = mips_cps_numcores(cl);
for (c = 0; c < ncores; c++) {
core_vpes = core_vpe_count(cl, c);
@@ -76,8 +123,8 @@ static void __init cps_smp_setup(void)
/* Indicate present CPUs (CPU being synonymous with VPE) */
for (v = 0; v < min_t(unsigned, nvpes, NR_CPUS); v++) {
- set_cpu_possible(v, cpu_cluster(&cpu_data[v]) == 0);
- set_cpu_present(v, cpu_cluster(&cpu_data[v]) == 0);
+ set_cpu_possible(v, true);
+ set_cpu_present(v, true);
__cpu_number_map[v] = v;
__cpu_logical_map[v] = v;
}
@@ -85,19 +132,15 @@ static void __init cps_smp_setup(void)
/* Set a coherent default CCA (CWB) */
change_c0_config(CONF_CM_CMASK, 0x5);
- /* Core 0 is powered up (we're running on it) */
- bitmap_set(core_power, 0, 1);
-
/* Initialise core 0 */
mips_cps_core_init();
/* Make core 0 coherent with everything */
write_gcr_cl_coherence(0xff);
- if (mips_cm_revision() >= CM_REV_CM3) {
- core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry);
+ core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry);
+ if (mips_cm_revision() >= CM_REV_CM3)
write_gcr_bev_base(core_entry);
- }
#ifdef CONFIG_MIPS_MT_FPAFF
/* If we have an FPU, enroll ourselves in the FPU-full mask */
@@ -177,6 +220,10 @@ static void __init cps_prepare_cpus(unsigned int max_cpus)
goto err_out;
mips_cps_cluster_bootcfg[cl].core_config = core_bootcfg;
+ mips_cps_cluster_bootcfg[cl].core_power =
+ kcalloc(BITS_TO_LONGS(ncores), sizeof(unsigned long),
+ GFP_KERNEL);
+
/* Allocate VPE boot configuration structs */
for (c = 0; c < ncores; c++) {
core_vpes = core_vpe_count(cl, c);
@@ -188,11 +235,12 @@ static void __init cps_prepare_cpus(unsigned int max_cpus)
}
}
- /* Mark this CPU as booted */
+ /* Mark this CPU as powered up & booted */
cl = cpu_cluster(¤t_cpu_data);
c = cpu_core(¤t_cpu_data);
cluster_bootcfg = &mips_cps_cluster_bootcfg[cl];
core_bootcfg = &cluster_bootcfg->core_config[c];
+ bitmap_set(cluster_bootcfg->core_power, cpu_core(¤t_cpu_data), 1);
atomic_set(&core_bootcfg->vpe_mask, 1 << cpu_vpe_id(¤t_cpu_data));
return;
@@ -220,16 +268,123 @@ static void __init cps_prepare_cpus(unsigned int max_cpus)
}
}
-static void boot_core(unsigned int core, unsigned int vpe_id)
+static void init_cluster_l2(void)
{
- u32 stat, seq_state;
- unsigned timeout;
+ u32 l2_cfg, l2sm_cop, result;
+
+ while (1) {
+ l2_cfg = read_gcr_redir_l2_ram_config();
+
+ /* If HCI is not supported, use the state machine below */
+ if (!(l2_cfg & CM_GCR_L2_RAM_CONFIG_PRESENT))
+ break;
+ if (!(l2_cfg & CM_GCR_L2_RAM_CONFIG_HCI_SUPPORTED))
+ break;
+
+ /* If the HCI_DONE bit is set, we're finished */
+ if (l2_cfg & CM_GCR_L2_RAM_CONFIG_HCI_DONE)
+ return;
+ }
+
+ l2sm_cop = read_gcr_redir_l2sm_cop();
+ if (WARN(!(l2sm_cop & CM_GCR_L2SM_COP_PRESENT),
+ "L2 init not supported on this system yet"))
+ return;
+
+ /* Clear L2 tag registers */
+ write_gcr_redir_l2_tag_state(0);
+ write_gcr_redir_l2_ecc(0);
+
+ /* Ensure the L2 tag writes complete before the state machine starts */
+ mb();
+
+ /* Wait for the L2 state machine to be idle */
+ do {
+ l2sm_cop = read_gcr_redir_l2sm_cop();
+ } while (l2sm_cop & CM_GCR_L2SM_COP_RUNNING);
+
+ /* Start a store tag operation */
+ l2sm_cop = CM_GCR_L2SM_COP_TYPE_IDX_STORETAG;
+ l2sm_cop <<= __ffs(CM_GCR_L2SM_COP_TYPE);
+ l2sm_cop |= CM_GCR_L2SM_COP_CMD_START;
+ write_gcr_redir_l2sm_cop(l2sm_cop);
+
+ /* Ensure the state machine starts before we poll for completion */
+ mb();
+
+ /* Wait for the operation to be complete */
+ do {
+ l2sm_cop = read_gcr_redir_l2sm_cop();
+ result = l2sm_cop & CM_GCR_L2SM_COP_RESULT;
+ result >>= __ffs(CM_GCR_L2SM_COP_RESULT);
+ } while (!result);
+
+ WARN(result != CM_GCR_L2SM_COP_RESULT_DONE_OK,
+ "L2 state machine failed cache init with error %u\n", result);
+}
+
+static void boot_core(unsigned int cluster, unsigned int core,
+ unsigned int vpe_id)
+{
+ struct cluster_boot_config *cluster_cfg;
+ u32 access, stat, seq_state;
+ unsigned int timeout, ncores;
+ unsigned long core_entry;
+
+ cluster_cfg = &mips_cps_cluster_bootcfg[cluster];
+ ncores = mips_cps_numcores(cluster);
+ core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry);
+
+ if ((cluster != cpu_cluster(¤t_cpu_data)) &&
+ bitmap_empty(cluster_cfg->core_power, ncores)) {
+ power_up_other_cluster(cluster);
+
+ mips_cm_lock_other(cluster, core, 0,
+ CM_GCR_Cx_OTHER_BLOCK_GLOBAL);
+
+ /* Ensure cluster GCRs are where we expect */
+ write_gcr_redir_base(read_gcr_base());
+ write_gcr_redir_cpc_base(read_gcr_cpc_base());
+ write_gcr_redir_gic_base(read_gcr_gic_base());
+
+ init_cluster_l2();
+
+ /* Mirror L2 configuration */
+ write_gcr_redir_l2_only_sync_base(read_gcr_l2_only_sync_base());
+ write_gcr_redir_l2_pft_control(read_gcr_l2_pft_control());
+ write_gcr_redir_l2_pft_control_b(read_gcr_l2_pft_control_b());
+
+ /* Mirror ECC/parity setup */
+ write_gcr_redir_err_control(read_gcr_err_control());
+
+ /* Set BEV base */
+ write_gcr_redir_bev_base(core_entry);
+
+ mips_cm_unlock_other();
+ }
+
+ if (cluster != cpu_cluster(¤t_cpu_data)) {
+ mips_cm_lock_other(cluster, core, 0,
+ CM_GCR_Cx_OTHER_BLOCK_GLOBAL);
+
+ /* Ensure the core can access the GCRs */
+ access = read_gcr_redir_access();
+ access |= BIT(core);
+ write_gcr_redir_access(access);
+
+ mips_cm_unlock_other();
+ } else {
+ /* Ensure the core can access the GCRs */
+ access = read_gcr_access();
+ access |= BIT(core);
+ write_gcr_access(access);
+ }
/* Select the appropriate core */
- mips_cm_lock_other(0, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
+ mips_cm_lock_other(cluster, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
/* Set its reset vector */
- write_gcr_co_reset_base(CKSEG1ADDR((unsigned long)mips_cps_core_entry));
+ write_gcr_co_reset_base(core_entry);
/* Ensure its coherency is disabled */
write_gcr_co_coherence(0);
@@ -237,9 +392,6 @@ static void boot_core(unsigned int core, unsigned int vpe_id)
/* Start it with the legacy memory map and exception base */
write_gcr_co_reset_ext_base(CM_GCR_Cx_RESET_EXT_BASE_UEB);
- /* Ensure the core can access the GCRs */
- set_gcr_access(1 << core);
-
if (mips_cpc_present()) {
/* Reset the core */
mips_cpc_lock_other(core);
@@ -289,7 +441,17 @@ static void boot_core(unsigned int core, unsigned int vpe_id)
mips_cm_unlock_other();
/* The core is now powered up */
- bitmap_set(core_power, core, 1);
+ bitmap_set(cluster_cfg->core_power, core, 1);
+
+ /*
+ * Restore CM_PWRUP=0 so that the CM can power down if all the cores in
+ * the cluster do (eg. if they're all removed via hotplug.
+ */
+ if (mips_cm_revision() >= CM_REV_CM3_5) {
+ mips_cm_lock_other(cluster, 0, 0, CM_GCR_Cx_OTHER_BLOCK_GLOBAL);
+ write_cpc_redir_pwrup_ctl(0);
+ mips_cm_unlock_other();
+ }
}
static void remote_vpe_boot(void *dummy)
@@ -316,10 +478,6 @@ static int cps_boot_secondary(int cpu, struct task_struct *idle)
unsigned int remote;
int err;
- /* We don't yet support booting CPUs in other clusters */
- if (cpu_cluster(&cpu_data[cpu]) != cpu_cluster(&raw_current_cpu_data))
- return -ENOSYS;
-
vpe_cfg->pc = (unsigned long)&smp_bootstrap;
vpe_cfg->sp = __KSTK_TOS(idle);
vpe_cfg->gp = (unsigned long)task_thread_info(idle);
@@ -328,14 +486,15 @@ static int cps_boot_secondary(int cpu, struct task_struct *idle)
preempt_disable();
- if (!test_bit(core, core_power)) {
+ if (!test_bit(core, cluster_cfg->core_power)) {
/* Boot a VPE on a powered down core */
- boot_core(core, vpe_id);
+ boot_core(cluster, core, vpe_id);
goto out;
}
if (cpu_has_vp) {
- mips_cm_lock_other(0, core, vpe_id, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
+ mips_cm_lock_other(cluster, core, vpe_id,
+ CM_GCR_Cx_OTHER_BLOCK_LOCAL);
core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry);
write_gcr_co_reset_base(core_entry);
mips_cm_unlock_other();
@@ -543,11 +702,15 @@ static void cps_cpu_die(unsigned int cpu) { }
static void cps_cleanup_dead_cpu(unsigned cpu)
{
+ unsigned int cluster = cpu_cluster(&cpu_data[cpu]);
unsigned core = cpu_core(&cpu_data[cpu]);
unsigned int vpe_id = cpu_vpe_id(&cpu_data[cpu]);
ktime_t fail_time;
unsigned stat;
int err;
+ struct cluster_boot_config *cluster_cfg;
+
+ cluster_cfg = &mips_cps_cluster_bootcfg[cluster];
/*
* Now wait for the CPU to actually offline. Without doing this that
@@ -599,7 +762,7 @@ static void cps_cleanup_dead_cpu(unsigned cpu)
} while (1);
/* Indicate the core is powered off */
- bitmap_clear(core_power, core, 1);
+ bitmap_clear(cluster_cfg->core_power, core, 1);
} else if (cpu_has_mipsmt) {
/*
* Have a CPU with access to the offlined CPUs registers wait