Message ID | 20230616063210.19063-2-eric.lin@sifive.com (mailing list archive) |
---|---|
State | Superseded |
Delegated to: | Conor Dooley |
Headers | show |
Series | Add SiFive Private L2 cache and PMU driver | expand |
On 16/06/2023 07:32, Eric Lin wrote: > This adds SiFive private L2 cache driver which will show > cache config information when booting and add cpu hotplug > callback functions. > > Signed-off-by: Eric Lin <eric.lin@sifive.com> > Signed-off-by: Nick Hu <nick.hu@sifive.com> > Reviewed-by: Zong Li <zong.li@sifive.com> > --- > drivers/soc/sifive/Kconfig | 8 + > drivers/soc/sifive/Makefile | 1 + > drivers/soc/sifive/sifive_pl2.h | 25 ++++ > drivers/soc/sifive/sifive_pl2_cache.c | 202 ++++++++++++++++++++++++++ > include/linux/cpuhotplug.h | 1 + > 5 files changed, 237 insertions(+) > create mode 100644 drivers/soc/sifive/sifive_pl2.h > create mode 100644 drivers/soc/sifive/sifive_pl2_cache.c > > diff --git a/drivers/soc/sifive/Kconfig b/drivers/soc/sifive/Kconfig > index e86870be34c9..573564295058 100644 > --- a/drivers/soc/sifive/Kconfig > +++ b/drivers/soc/sifive/Kconfig > @@ -7,4 +7,12 @@ config SIFIVE_CCACHE > help > Support for the composable cache controller on SiFive platforms. > > +config SIFIVE_PL2 > + bool "Sifive private L2 Cache controller" > + help > + Support for the private L2 cache controller on SiFive platforms. > + The SiFive Private L2 Cache Controller is per hart and communicates > + with both the upstream L1 caches and downstream L3 cache or memory, > + enabling a high-performance cache subsystem. > + > endif > diff --git a/drivers/soc/sifive/Makefile b/drivers/soc/sifive/Makefile > index 1f5dc339bf82..707493e1c691 100644 > --- a/drivers/soc/sifive/Makefile > +++ b/drivers/soc/sifive/Makefile > @@ -1,3 +1,4 @@ > # SPDX-License-Identifier: GPL-2.0 > > obj-$(CONFIG_SIFIVE_CCACHE) += sifive_ccache.o > +obj-$(CONFIG_SIFIVE_PL2) += sifive_pl2_cache.o > diff --git a/drivers/soc/sifive/sifive_pl2.h b/drivers/soc/sifive/sifive_pl2.h > new file mode 100644 > index 000000000000..57aa1019d5ed > --- /dev/null > +++ b/drivers/soc/sifive/sifive_pl2.h > @@ -0,0 +1,25 @@ > +/* SPDX-License-Identifier: GPL-2.0 */ > +/* > + * Copyright (C) 2023 SiFive, Inc. > + * > + */ > + > +#ifndef _SIFIVE_PL2_H > +#define _SIFIVE_PL2_H > + > +#define SIFIVE_PL2_CONFIG1_OFFSET 0x1000 > +#define SIFIVE_PL2_CONFIG0_OFFSET 0x1008 > +#define SIFIVE_PL2_PMCLIENT_OFFSET 0x2800 > + > +struct sifive_pl2_state { > + void __iomem *pl2_base; > + u32 config1; > + u32 config0; > + u64 pmclientfilter; > +}; > + > +int sifive_pl2_pmu_init(void); > +int sifive_pl2_pmu_probe(struct device_node *pl2_node, > + void __iomem *pl2_base, int cpu); > + > +#endif /*_SIFIVE_PL2_H */ > diff --git a/drivers/soc/sifive/sifive_pl2_cache.c b/drivers/soc/sifive/sifive_pl2_cache.c > new file mode 100644 > index 000000000000..aeb51d576af9 > --- /dev/null > +++ b/drivers/soc/sifive/sifive_pl2_cache.c > @@ -0,0 +1,202 @@ > +// SPDX-License-Identifier: GPL-2.0 > +/* > + * SiFive private L2 cache controller Driver > + * > + * Copyright (C) 2018-2023 SiFive, Inc. > + */ > + > +#define pr_fmt(fmt) "pL2CACHE: " fmt > + > +#include <linux/of.h> > +#include <linux/of_device.h> > +#include <linux/platform_device.h> > +#include <linux/io.h> > +#include <linux/cpu_pm.h> > +#include <linux/cpuhotplug.h> > +#include "sifive_pl2.h" > + > +static DEFINE_PER_CPU(struct sifive_pl2_state, sifive_pl2_state); > + > +static void sifive_pl2_state_save(struct sifive_pl2_state *pl2_state) > +{ > + void __iomem *pl2_base = pl2_state->pl2_base; > + > + if (!pl2_base) > + return; is this test realy needed? > + > + pl2_state->config1 = readl(pl2_base + SIFIVE_PL2_CONFIG1_OFFSET); > + pl2_state->config0 = readl(pl2_base + SIFIVE_PL2_CONFIG0_OFFSET); > + pl2_state->pmclientfilter = readq(pl2_base + SIFIVE_PL2_PMCLIENT_OFFSET); > +} > + > +static void sifive_pl2_state_restore(struct sifive_pl2_state *pl2_state) > +{ > + void __iomem *pl2_base = pl2_state->pl2_base; > + > + if (!pl2_base) > + return; > + > + writel(pl2_state->config1, pl2_base + SIFIVE_PL2_CONFIG1_OFFSET); > + writel(pl2_state->config0, pl2_base + SIFIVE_PL2_CONFIG0_OFFSET); > + writeq(pl2_state->pmclientfilter, pl2_base + SIFIVE_PL2_PMCLIENT_OFFSET); > +} > + > +/* > + * CPU Hotplug call back function > + */ > +static int sifive_pl2_online_cpu(unsigned int cpu) > +{ > + struct sifive_pl2_state *pl2_state = this_cpu_ptr(&sifive_pl2_state); > + > + sifive_pl2_state_restore(pl2_state); > + > + return 0; > +} > + > +static int sifive_pl2_offline_cpu(unsigned int cpu) > +{ > + struct sifive_pl2_state *pl2_state = this_cpu_ptr(&sifive_pl2_state); > + > + /* Save the pl2 state */ > + sifive_pl2_state_save(pl2_state); > + > + return 0; > +} > + > +/* > + * PM notifer for suspend to ram > + */ > +#ifdef CONFIG_CPU_PM > +static int sifive_pl2_pm_notify(struct notifier_block *b, unsigned long cmd, > + void *v) > +{ > + struct sifive_pl2_state *pl2_state = this_cpu_ptr(&sifive_pl2_state); > + > + switch (cmd) { > + case CPU_PM_ENTER: > + /* Save the pl2 state */ > + sifive_pl2_state_save(pl2_state); > + break; > + case CPU_PM_ENTER_FAILED: > + case CPU_PM_EXIT: > + sifive_pl2_state_restore(pl2_state); > + break; > + default: > + break; > + } > + > + return NOTIFY_OK; > +} > + > +static struct notifier_block sifive_pl2_pm_notifier_block = { > + .notifier_call = sifive_pl2_pm_notify, > +}; > + > +static inline void sifive_pl2_pm_init(void) > +{ > + cpu_pm_register_notifier(&sifive_pl2_pm_notifier_block); > +} > + > +#else > +static inline void sifive_pl2_pm_init(void) { } > +#endif /* CONFIG_CPU_PM */ > + > +static const struct of_device_id sifive_pl2_cache_of_ids[] = { > + { .compatible = "sifive,pL2Cache0" }, > + { .compatible = "sifive,pL2Cache1" }, why the single cap here? I think that looks ugly. > + { /* sentinel value */ } > +}; > + > +static void pl2_config_read(void __iomem *pl2_base, int cpu) > +{ > + u32 regval, bank, way, set, cacheline; > + > + regval = readl(pl2_base); > + bank = regval & 0xff; > + pr_info("in the CPU: %d\n", cpu); > + pr_info("No. of Banks in the cache: %d\n", bank); > + way = (regval & 0xff00) >> 8; > + pr_info("No. of ways per bank: %d\n", way); > + set = (regval & 0xff0000) >> 16; > + pr_info("Total sets: %llu\n", (uint64_t)1 << set); > + cacheline = (regval & 0xff000000) >> 24; > + pr_info("Bytes per cache block: %llu\n", (uint64_t)1 << cacheline); > + pr_info("Size: %d\n", way << (set + cacheline)); please either remove this or make it a single line, this is just going to spam the log with any system with more than one cpu core. > +} > + > +static int sifive_pl2_cache_dev_probe(struct platform_device *pdev) > +{ > + struct resource *res; > + int cpu, ret = -EINVAL; > + struct device_node *cpu_node, *pl2_node; > + struct sifive_pl2_state *pl2_state = NULL; > + void __iomem *pl2_base; > + > + /* Traverse all cpu nodes to find the one mapping to its pl2 node. */ > + for_each_cpu(cpu, cpu_possible_mask) { > + cpu_node = of_cpu_device_node_get(cpu); > + pl2_node = of_parse_phandle(cpu_node, "next-level-cache", 0); > + > + /* Found it! */ > + if (dev_of_node(&pdev->dev) == pl2_node) { > + /* Use cpu to get its percpu data sifive_pl2_state. */ > + pl2_state = per_cpu_ptr(&sifive_pl2_state, cpu); > + break; > + } > + } > + > + if (!pl2_state) { > + pr_err("Not found the corresponding cpu_node in dts.\n"); > + goto early_err; > + } > + > + /* Set base address of select and counter registers. */ > + pl2_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); > + if (IS_ERR(pl2_base)) { > + ret = PTR_ERR(pl2_base); > + goto early_err; > + } > + > + /* Print pL2 configs. */ > + pl2_config_read(pl2_base, cpu); > + pl2_state->pl2_base = pl2_base; > + > + return 0; > + > +early_err: > + return ret; > +} > + > +static struct platform_driver sifive_pl2_cache_driver = { > + .driver = { > + .name = "SiFive-pL2-cache", > + .of_match_table = sifive_pl2_cache_of_ids, > + }, > + .probe = sifive_pl2_cache_dev_probe, > +}; > + > +static int __init sifive_pl2_cache_init(void) > +{ > + int ret; > + > + ret = cpuhp_setup_state(CPUHP_AP_RISCV_SIFIVE_PL2_ONLINE, > + "soc/sifive/pl2:online", > + sifive_pl2_online_cpu, > + sifive_pl2_offline_cpu); > + if (ret < 0) { > + pr_err("Failed to register CPU hotplug notifier %d\n", ret); > + return ret; > + } > + > + ret = platform_driver_register(&sifive_pl2_cache_driver); > + if (ret) { > + pr_err("Failed to register sifive_pl2_cache_driver: %d\n", ret); > + return ret; > + } > + > + sifive_pl2_pm_init(); > + > + return ret; > +} > + > +device_initcall(sifive_pl2_cache_init); > diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h > index 0f1001dca0e0..35cd5ba0030b 100644 > --- a/include/linux/cpuhotplug.h > +++ b/include/linux/cpuhotplug.h > @@ -207,6 +207,7 @@ enum cpuhp_state { > CPUHP_AP_IRQ_AFFINITY_ONLINE, > CPUHP_AP_BLK_MQ_ONLINE, > CPUHP_AP_ARM_MVEBU_SYNC_CLOCKS, > + CPUHP_AP_RISCV_SIFIVE_PL2_ONLINE, > CPUHP_AP_X86_INTEL_EPB_ONLINE, > CPUHP_AP_PERF_ONLINE, > CPUHP_AP_PERF_X86_ONLINE,
Le 16/06/2023 à 08:32, Eric Lin a écrit : > This adds SiFive private L2 cache driver which will show > cache config information when booting and add cpu hotplug > callback functions. > > Signed-off-by: Eric Lin <eric.lin-SpMDHPYPyPbQT0dZR+AlfA@public.gmane.org> > Signed-off-by: Nick Hu <nick.hu-SpMDHPYPyPbQT0dZR+AlfA@public.gmane.org> > Reviewed-by: Zong Li <zong.li-SpMDHPYPyPbQT0dZR+AlfA@public.gmane.org> [...] > +static int __init sifive_pl2_cache_init(void) > +{ > + int ret; > + > + ret = cpuhp_setup_state(CPUHP_AP_RISCV_SIFIVE_PL2_ONLINE, > + "soc/sifive/pl2:online", > + sifive_pl2_online_cpu, > + sifive_pl2_offline_cpu); > + if (ret < 0) { > + pr_err("Failed to register CPU hotplug notifier %d\n", ret); > + return ret; > + } > + > + ret = platform_driver_register(&sifive_pl2_cache_driver); > + if (ret) { > + pr_err("Failed to register sifive_pl2_cache_driver: %d\n", ret); Blind guess: does cpuhp_remove_state() needs to be called? > + return ret; > + } > + > + sifive_pl2_pm_init(); > + > + return ret; If you send a v2, return 0; would be slighly nicer here. CJ > +} [...]
Hey Eric, On Fri, Jun 16, 2023 at 02:32:08PM +0800, Eric Lin wrote: > This adds SiFive private L2 cache driver which will show > cache config information when booting and add cpu hotplug > callback functions. > > Signed-off-by: Eric Lin <eric.lin@sifive.com> > Signed-off-by: Nick Hu <nick.hu@sifive.com> Missing a Co-developed-by for Nick? > +static void pl2_config_read(void __iomem *pl2_base, int cpu) > +{ > + u32 regval, bank, way, set, cacheline; > + > + regval = readl(pl2_base); > + bank = regval & 0xff; > + pr_info("in the CPU: %d\n", cpu); > + pr_info("No. of Banks in the cache: %d\n", bank); > + way = (regval & 0xff00) >> 8; > + pr_info("No. of ways per bank: %d\n", way); > + set = (regval & 0xff0000) >> 16; > + pr_info("Total sets: %llu\n", (uint64_t)1 << set); > + cacheline = (regval & 0xff000000) >> 24; > + pr_info("Bytes per cache block: %llu\n", (uint64_t)1 << cacheline); > + pr_info("Size: %d\n", way << (set + cacheline)); > +} Isn't this basically all information that we get anyway in sysfs based on what gets put into the DT, except printed out once per CPU at boottime? If there's reason to keep it, please do as suggested by Ben and cut down the number of lines emitted. Look at the ccache one for comparison: static void ccache_config_read(void) { u32 cfg; cfg = readl(ccache_base + SIFIVE_CCACHE_CONFIG); pr_info("%llu banks, %llu ways, sets/bank=%llu, bytes/block=%llu\n", FIELD_GET(SIFIVE_CCACHE_CONFIG_BANK_MASK, cfg), FIELD_GET(SIFIVE_CCACHE_CONFIG_WAYS_MASK, cfg), BIT_ULL(FIELD_GET(SIFIVE_CCACHE_CONFIG_SETS_MASK, cfg)), BIT_ULL(FIELD_GET(SIFIVE_CCACHE_CONFIG_BLKS_MASK, cfg))); cfg = readl(ccache_base + SIFIVE_CCACHE_WAYENABLE); pr_info("Index of the largest way enabled: %u\n", cfg); } It'd also be good to print the same things as the ccache, no? > +static int sifive_pl2_cache_dev_probe(struct platform_device *pdev) > +{ > + struct resource *res; > + int cpu, ret = -EINVAL; > + struct device_node *cpu_node, *pl2_node; > + struct sifive_pl2_state *pl2_state = NULL; > + void __iomem *pl2_base; Please pick a sensible ordering for variables. IDC if it is reverse xmas tree, or sorting by types, but this just seems quite random.. > + /* Traverse all cpu nodes to find the one mapping to its pl2 node. */ > + for_each_cpu(cpu, cpu_possible_mask) { > + cpu_node = of_cpu_device_node_get(cpu); > + pl2_node = of_parse_phandle(cpu_node, "next-level-cache", 0); > + > + /* Found it! */ > + if (dev_of_node(&pdev->dev) == pl2_node) { > + /* Use cpu to get its percpu data sifive_pl2_state. */ > + pl2_state = per_cpu_ptr(&sifive_pl2_state, cpu); > + break; > + } > + } > + > + if (!pl2_state) { > + pr_err("Not found the corresponding cpu_node in dts.\n"); I don't think this error message is going to be helpful in figuring out where the problem is on a machine with many of the caches. More information about *which* cache caused it would be good. Also it is not grammatically correct, it should read something like "Failed to find CPU node for cache@abc" or something along those lines. > + goto early_err; early_err just returns ret. Why not just return the error directly? > + } > + > + /* Set base address of select and counter registers. */ > + pl2_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); > + if (IS_ERR(pl2_base)) { > + ret = PTR_ERR(pl2_base); > + goto early_err; > + } > + > + /* Print pL2 configs. */ > + pl2_config_read(pl2_base, cpu); > + pl2_state->pl2_base = pl2_base; > + > + return 0; > + > +early_err: > + return ret; > +} > +static struct platform_driver sifive_pl2_cache_driver = { > + .driver = { > + .name = "SiFive-pL2-cache", > + .of_match_table = sifive_pl2_cache_of_ids, > + }, > + .probe = sifive_pl2_cache_dev_probe, > +}; > + > +static int __init sifive_pl2_cache_init(void) > +{ > + int ret; > + > + ret = cpuhp_setup_state(CPUHP_AP_RISCV_SIFIVE_PL2_ONLINE, > + "soc/sifive/pl2:online", > + sifive_pl2_online_cpu, > + sifive_pl2_offline_cpu); Got some weird use of whitespace here & above, please remove the spaces. Cheers, Conor.
Hi Ben, On Fri, Jun 16, 2023 at 4:30 PM Ben Dooks <ben.dooks@codethink.co.uk> wrote: > > On 16/06/2023 07:32, Eric Lin wrote: > > This adds SiFive private L2 cache driver which will show > > cache config information when booting and add cpu hotplug > > callback functions. > > > > Signed-off-by: Eric Lin <eric.lin@sifive.com> > > Signed-off-by: Nick Hu <nick.hu@sifive.com> > > Reviewed-by: Zong Li <zong.li@sifive.com> > > --- > > drivers/soc/sifive/Kconfig | 8 + > > drivers/soc/sifive/Makefile | 1 + > > drivers/soc/sifive/sifive_pl2.h | 25 ++++ > > drivers/soc/sifive/sifive_pl2_cache.c | 202 ++++++++++++++++++++++++++ > > include/linux/cpuhotplug.h | 1 + > > 5 files changed, 237 insertions(+) > > create mode 100644 drivers/soc/sifive/sifive_pl2.h > > create mode 100644 drivers/soc/sifive/sifive_pl2_cache.c > > > > diff --git a/drivers/soc/sifive/Kconfig b/drivers/soc/sifive/Kconfig > > index e86870be34c9..573564295058 100644 > > --- a/drivers/soc/sifive/Kconfig > > +++ b/drivers/soc/sifive/Kconfig > > @@ -7,4 +7,12 @@ config SIFIVE_CCACHE > > help > > Support for the composable cache controller on SiFive platforms. > > > > +config SIFIVE_PL2 > > + bool "Sifive private L2 Cache controller" > > + help > > + Support for the private L2 cache controller on SiFive platforms. > > + The SiFive Private L2 Cache Controller is per hart and communicates > > + with both the upstream L1 caches and downstream L3 cache or memory, > > + enabling a high-performance cache subsystem. > > + > > endif > > diff --git a/drivers/soc/sifive/Makefile b/drivers/soc/sifive/Makefile > > index 1f5dc339bf82..707493e1c691 100644 > > --- a/drivers/soc/sifive/Makefile > > +++ b/drivers/soc/sifive/Makefile > > @@ -1,3 +1,4 @@ > > # SPDX-License-Identifier: GPL-2.0 > > > > obj-$(CONFIG_SIFIVE_CCACHE) += sifive_ccache.o > > +obj-$(CONFIG_SIFIVE_PL2) += sifive_pl2_cache.o > > diff --git a/drivers/soc/sifive/sifive_pl2.h b/drivers/soc/sifive/sifive_pl2.h > > new file mode 100644 > > index 000000000000..57aa1019d5ed > > --- /dev/null > > +++ b/drivers/soc/sifive/sifive_pl2.h > > @@ -0,0 +1,25 @@ > > +/* SPDX-License-Identifier: GPL-2.0 */ > > +/* > > + * Copyright (C) 2023 SiFive, Inc. > > + * > > + */ > > + > > +#ifndef _SIFIVE_PL2_H > > +#define _SIFIVE_PL2_H > > + > > +#define SIFIVE_PL2_CONFIG1_OFFSET 0x1000 > > +#define SIFIVE_PL2_CONFIG0_OFFSET 0x1008 > > +#define SIFIVE_PL2_PMCLIENT_OFFSET 0x2800 > > + > > +struct sifive_pl2_state { > > + void __iomem *pl2_base; > > + u32 config1; > > + u32 config0; > > + u64 pmclientfilter; > > +}; > > + > > +int sifive_pl2_pmu_init(void); > > +int sifive_pl2_pmu_probe(struct device_node *pl2_node, > > + void __iomem *pl2_base, int cpu); > > + > > +#endif /*_SIFIVE_PL2_H */ > > diff --git a/drivers/soc/sifive/sifive_pl2_cache.c b/drivers/soc/sifive/sifive_pl2_cache.c > > new file mode 100644 > > index 000000000000..aeb51d576af9 > > --- /dev/null > > +++ b/drivers/soc/sifive/sifive_pl2_cache.c > > @@ -0,0 +1,202 @@ > > +// SPDX-License-Identifier: GPL-2.0 > > +/* > > + * SiFive private L2 cache controller Driver > > + * > > + * Copyright (C) 2018-2023 SiFive, Inc. > > + */ > > + > > +#define pr_fmt(fmt) "pL2CACHE: " fmt > > + > > +#include <linux/of.h> > > +#include <linux/of_device.h> > > +#include <linux/platform_device.h> > > +#include <linux/io.h> > > +#include <linux/cpu_pm.h> > > +#include <linux/cpuhotplug.h> > > +#include "sifive_pl2.h" > > + > > +static DEFINE_PER_CPU(struct sifive_pl2_state, sifive_pl2_state); > > + > > +static void sifive_pl2_state_save(struct sifive_pl2_state *pl2_state) > > +{ > > + void __iomem *pl2_base = pl2_state->pl2_base; > > + > > + if (!pl2_base) > > + return; > > is this test realy needed? > Yes, The function cpuhp_setup_state() is called before sifive_pl2_cache_dev_probe(). When registering the CPU hotplug state, the kernel will issue the pl2 CPU hotplug callback. However, the pl2_base is not yet being ioremap in sifive_pl2_cache_dev_probe(). Therefore, it is necessary to check pl2_base first to avoid such a scenario. > > + > > + pl2_state->config1 = readl(pl2_base + SIFIVE_PL2_CONFIG1_OFFSET); > > + pl2_state->config0 = readl(pl2_base + SIFIVE_PL2_CONFIG0_OFFSET); > > + pl2_state->pmclientfilter = readq(pl2_base + SIFIVE_PL2_PMCLIENT_OFFSET); > > +} > > + > > +static void sifive_pl2_state_restore(struct sifive_pl2_state *pl2_state) > > +{ > > + void __iomem *pl2_base = pl2_state->pl2_base; > > + > > + if (!pl2_base) > > + return; > > + > > + writel(pl2_state->config1, pl2_base + SIFIVE_PL2_CONFIG1_OFFSET); > > + writel(pl2_state->config0, pl2_base + SIFIVE_PL2_CONFIG0_OFFSET); > > + writeq(pl2_state->pmclientfilter, pl2_base + SIFIVE_PL2_PMCLIENT_OFFSET); > > +} > > + > > +/* > > + * CPU Hotplug call back function > > + */ > > +static int sifive_pl2_online_cpu(unsigned int cpu) > > +{ > > + struct sifive_pl2_state *pl2_state = this_cpu_ptr(&sifive_pl2_state); > > + > > + sifive_pl2_state_restore(pl2_state); > > + > > + return 0; > > +} > > + > > +static int sifive_pl2_offline_cpu(unsigned int cpu) > > +{ > > + struct sifive_pl2_state *pl2_state = this_cpu_ptr(&sifive_pl2_state); > > + > > + /* Save the pl2 state */ > > + sifive_pl2_state_save(pl2_state); > > + > > + return 0; > > +} > > + > > +/* > > + * PM notifer for suspend to ram > > + */ > > +#ifdef CONFIG_CPU_PM > > +static int sifive_pl2_pm_notify(struct notifier_block *b, unsigned long cmd, > > + void *v) > > +{ > > + struct sifive_pl2_state *pl2_state = this_cpu_ptr(&sifive_pl2_state); > > + > > + switch (cmd) { > > + case CPU_PM_ENTER: > > + /* Save the pl2 state */ > > + sifive_pl2_state_save(pl2_state); > > + break; > > + case CPU_PM_ENTER_FAILED: > > + case CPU_PM_EXIT: > > + sifive_pl2_state_restore(pl2_state); > > + break; > > + default: > > + break; > > + } > > + > > + return NOTIFY_OK; > > +} > > + > > +static struct notifier_block sifive_pl2_pm_notifier_block = { > > + .notifier_call = sifive_pl2_pm_notify, > > +}; > > + > > +static inline void sifive_pl2_pm_init(void) > > +{ > > + cpu_pm_register_notifier(&sifive_pl2_pm_notifier_block); > > +} > > + > > +#else > > +static inline void sifive_pl2_pm_init(void) { } > > +#endif /* CONFIG_CPU_PM */ > > + > > +static const struct of_device_id sifive_pl2_cache_of_ids[] = { > > + { .compatible = "sifive,pL2Cache0" }, > > + { .compatible = "sifive,pL2Cache1" }, > > why the single cap here? I think that looks ugly. > OK, I'll fix it in v2. > > + { /* sentinel value */ } > > +}; > > + > > +static void pl2_config_read(void __iomem *pl2_base, int cpu) > > +{ > > + u32 regval, bank, way, set, cacheline; > > + > > + regval = readl(pl2_base); > > + bank = regval & 0xff; > > + pr_info("in the CPU: %d\n", cpu); > > + pr_info("No. of Banks in the cache: %d\n", bank); > > + way = (regval & 0xff00) >> 8; > > + pr_info("No. of ways per bank: %d\n", way); > > + set = (regval & 0xff0000) >> 16; > > + pr_info("Total sets: %llu\n", (uint64_t)1 << set); > > + cacheline = (regval & 0xff000000) >> 24; > > + pr_info("Bytes per cache block: %llu\n", (uint64_t)1 << cacheline); > > + pr_info("Size: %d\n", way << (set + cacheline)); > > > please either remove this or make it a single line, this is just going > to spam the log with any system with more than one cpu core. > OK, I will make this log more simple in v2. Thanks for the review. Best Regards, Eric Lin. > > +} > > + > > +static int sifive_pl2_cache_dev_probe(struct platform_device *pdev) > > +{ > > + struct resource *res; > > + int cpu, ret = -EINVAL; > > + struct device_node *cpu_node, *pl2_node; > > + struct sifive_pl2_state *pl2_state = NULL; > > + void __iomem *pl2_base; > > + > > + /* Traverse all cpu nodes to find the one mapping to its pl2 node. */ > > + for_each_cpu(cpu, cpu_possible_mask) { > > + cpu_node = of_cpu_device_node_get(cpu); > > + pl2_node = of_parse_phandle(cpu_node, "next-level-cache", 0); > > + > > + /* Found it! */ > > + if (dev_of_node(&pdev->dev) == pl2_node) { > > + /* Use cpu to get its percpu data sifive_pl2_state. */ > > + pl2_state = per_cpu_ptr(&sifive_pl2_state, cpu); > > + break; > > + } > > + } > > + > > + if (!pl2_state) { > > + pr_err("Not found the corresponding cpu_node in dts.\n"); > > + goto early_err; > > + } > > + > > + /* Set base address of select and counter registers. */ > > + pl2_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); > > + if (IS_ERR(pl2_base)) { > > + ret = PTR_ERR(pl2_base); > > + goto early_err; > > + } > > + > > + /* Print pL2 configs. */ > > + pl2_config_read(pl2_base, cpu); > > + pl2_state->pl2_base = pl2_base; > > + > > + return 0; > > + > > +early_err: > > + return ret; > > +} > > + > > +static struct platform_driver sifive_pl2_cache_driver = { > > + .driver = { > > + .name = "SiFive-pL2-cache", > > + .of_match_table = sifive_pl2_cache_of_ids, > > + }, > > + .probe = sifive_pl2_cache_dev_probe, > > +}; > > + > > +static int __init sifive_pl2_cache_init(void) > > +{ > > + int ret; > > + > > + ret = cpuhp_setup_state(CPUHP_AP_RISCV_SIFIVE_PL2_ONLINE, > > + "soc/sifive/pl2:online", > > + sifive_pl2_online_cpu, > > + sifive_pl2_offline_cpu); > > + if (ret < 0) { > > + pr_err("Failed to register CPU hotplug notifier %d\n", ret); > > + return ret; > > + } > > + > > + ret = platform_driver_register(&sifive_pl2_cache_driver); > > + if (ret) { > > + pr_err("Failed to register sifive_pl2_cache_driver: %d\n", ret); > > + return ret; > > + } > > + > > + sifive_pl2_pm_init(); > > + > > + return ret; > > +} > > + > > +device_initcall(sifive_pl2_cache_init); > > diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h > > index 0f1001dca0e0..35cd5ba0030b 100644 > > --- a/include/linux/cpuhotplug.h > > +++ b/include/linux/cpuhotplug.h > > @@ -207,6 +207,7 @@ enum cpuhp_state { > > CPUHP_AP_IRQ_AFFINITY_ONLINE, > > CPUHP_AP_BLK_MQ_ONLINE, > > CPUHP_AP_ARM_MVEBU_SYNC_CLOCKS, > > + CPUHP_AP_RISCV_SIFIVE_PL2_ONLINE, > > CPUHP_AP_X86_INTEL_EPB_ONLINE, > > CPUHP_AP_PERF_ONLINE, > > CPUHP_AP_PERF_X86_ONLINE, > > -- > Ben Dooks http://www.codethink.co.uk/ > Senior Engineer Codethink - Providing Genius > > https://www.codethink.co.uk/privacy.html >
Hi Christophe, On Sat, Jun 17, 2023 at 3:02 AM Christophe JAILLET <christophe.jaillet@wanadoo.fr> wrote: > > Le 16/06/2023 à 08:32, Eric Lin a écrit : > > This adds SiFive private L2 cache driver which will show > > cache config information when booting and add cpu hotplug > > callback functions. > > > > Signed-off-by: Eric Lin <eric.lin-SpMDHPYPyPbQT0dZR+AlfA@public.gmane.org> > > Signed-off-by: Nick Hu <nick.hu-SpMDHPYPyPbQT0dZR+AlfA@public.gmane.org> > > Reviewed-by: Zong Li <zong.li-SpMDHPYPyPbQT0dZR+AlfA@public.gmane.org> > > [...] > > > +static int __init sifive_pl2_cache_init(void) > > +{ > > + int ret; > > + > > + ret = cpuhp_setup_state(CPUHP_AP_RISCV_SIFIVE_PL2_ONLINE, > > + "soc/sifive/pl2:online", > > + sifive_pl2_online_cpu, > > + sifive_pl2_offline_cpu); > > + if (ret < 0) { > > + pr_err("Failed to register CPU hotplug notifier %d\n", ret); > > + return ret; > > + } > > + > > + ret = platform_driver_register(&sifive_pl2_cache_driver); > > + if (ret) { > > + pr_err("Failed to register sifive_pl2_cache_driver: %d\n", ret); > > Blind guess: does cpuhp_remove_state() needs to be called? > Yes, I'll fix this in v2. Thanks. > > + return ret; > > + } > > + > > + sifive_pl2_pm_init(); > > + > > + return ret; > > If you send a v2, return 0; would be slighly nicer here. > OK, I'll fix it in v2. Thanks for the review. Best regards, Eric Lin > CJ > > > +} > > [...] >
Hi Conor, On Sat, Jun 17, 2023 at 5:05 AM Conor Dooley <conor@kernel.org> wrote: > > Hey Eric, > > On Fri, Jun 16, 2023 at 02:32:08PM +0800, Eric Lin wrote: > > This adds SiFive private L2 cache driver which will show > > cache config information when booting and add cpu hotplug > > callback functions. > > > > Signed-off-by: Eric Lin <eric.lin@sifive.com> > > Signed-off-by: Nick Hu <nick.hu@sifive.com> > > Missing a Co-developed-by for Nick? Yes, I'll add Co-developed-by for Nick in v2. Thanks. > > > > +static void pl2_config_read(void __iomem *pl2_base, int cpu) > > +{ > > + u32 regval, bank, way, set, cacheline; > > + > > + regval = readl(pl2_base); > > + bank = regval & 0xff; > > + pr_info("in the CPU: %d\n", cpu); > > + pr_info("No. of Banks in the cache: %d\n", bank); > > + way = (regval & 0xff00) >> 8; > > + pr_info("No. of ways per bank: %d\n", way); > > + set = (regval & 0xff0000) >> 16; > > + pr_info("Total sets: %llu\n", (uint64_t)1 << set); > > + cacheline = (regval & 0xff000000) >> 24; > > + pr_info("Bytes per cache block: %llu\n", (uint64_t)1 << cacheline); > > + pr_info("Size: %d\n", way << (set + cacheline)); > > +} > > Isn't this basically all information that we get anyway in sysfs based > on what gets put into the DT, except printed out once per CPU at > boottime? > If there's reason to keep it, please do as suggested by Ben and cut down > the number of lines emitted. Look at the ccache one for comparison: > static void ccache_config_read(void) > { > u32 cfg; > > cfg = readl(ccache_base + SIFIVE_CCACHE_CONFIG); > pr_info("%llu banks, %llu ways, sets/bank=%llu, bytes/block=%llu\n", > FIELD_GET(SIFIVE_CCACHE_CONFIG_BANK_MASK, cfg), > FIELD_GET(SIFIVE_CCACHE_CONFIG_WAYS_MASK, cfg), > BIT_ULL(FIELD_GET(SIFIVE_CCACHE_CONFIG_SETS_MASK, cfg)), > BIT_ULL(FIELD_GET(SIFIVE_CCACHE_CONFIG_BLKS_MASK, cfg))); > > cfg = readl(ccache_base + SIFIVE_CCACHE_WAYENABLE); > pr_info("Index of the largest way enabled: %u\n", cfg); > } > It'd also be good to print the same things as the ccache, no? > Yes, I'll cut down the number of lines as the ccache in v2. Thanks for the suggestion. > > +static int sifive_pl2_cache_dev_probe(struct platform_device *pdev) > > +{ > > + struct resource *res; > > + int cpu, ret = -EINVAL; > > + struct device_node *cpu_node, *pl2_node; > > + struct sifive_pl2_state *pl2_state = NULL; > > + void __iomem *pl2_base; > > Please pick a sensible ordering for variables. IDC if it is reverse xmas > tree, or sorting by types, but this just seems quite random.. > Yes, I'll sort by type in v2. > > + /* Traverse all cpu nodes to find the one mapping to its pl2 node. */ > > + for_each_cpu(cpu, cpu_possible_mask) { > > + cpu_node = of_cpu_device_node_get(cpu); > > + pl2_node = of_parse_phandle(cpu_node, "next-level-cache", 0); > > + > > + /* Found it! */ > > + if (dev_of_node(&pdev->dev) == pl2_node) { > > + /* Use cpu to get its percpu data sifive_pl2_state. */ > > + pl2_state = per_cpu_ptr(&sifive_pl2_state, cpu); > > + break; > > + } > > + } > > + > > + if (!pl2_state) { > > + pr_err("Not found the corresponding cpu_node in dts.\n"); > > I don't think this error message is going to be helpful in figuring out > where the problem is on a machine with many of the caches. More > information about *which* cache caused it would be good. > Also it is not grammatically correct, it should read something like > "Failed to find CPU node for cache@abc" or something along those lines. > OK, I'll rewrite the error message to make it more helpful for the user. I'll fix it in v2. Thanks for the suggestion. > > + goto early_err; > > early_err just returns ret. Why not just return the error directly? > Yeah, it can just return ret. I'll fix it in v2. > > + } > > + > > + /* Set base address of select and counter registers. */ > > + pl2_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); > > + if (IS_ERR(pl2_base)) { > > + ret = PTR_ERR(pl2_base); > > + goto early_err; > > + } > > + > > + /* Print pL2 configs. */ > > + pl2_config_read(pl2_base, cpu); > > + pl2_state->pl2_base = pl2_base; > > + > > + return 0; > > + > > +early_err: > > + return ret; > > +} > > > +static struct platform_driver sifive_pl2_cache_driver = { > > + .driver = { > > + .name = "SiFive-pL2-cache", > > + .of_match_table = sifive_pl2_cache_of_ids, > > + }, > > + .probe = sifive_pl2_cache_dev_probe, > > +}; > > + > > +static int __init sifive_pl2_cache_init(void) > > +{ > > + int ret; > > + > > + ret = cpuhp_setup_state(CPUHP_AP_RISCV_SIFIVE_PL2_ONLINE, > > + "soc/sifive/pl2:online", > > + sifive_pl2_online_cpu, > > + sifive_pl2_offline_cpu); > > Got some weird use of whitespace here & above, please remove the spaces. > Yes, I'll remove the whitespace in v2. Thanks for the review. Best Regards, Eric Lin. > Cheers, > Conor.
diff --git a/drivers/soc/sifive/Kconfig b/drivers/soc/sifive/Kconfig index e86870be34c9..573564295058 100644 --- a/drivers/soc/sifive/Kconfig +++ b/drivers/soc/sifive/Kconfig @@ -7,4 +7,12 @@ config SIFIVE_CCACHE help Support for the composable cache controller on SiFive platforms. +config SIFIVE_PL2 + bool "Sifive private L2 Cache controller" + help + Support for the private L2 cache controller on SiFive platforms. + The SiFive Private L2 Cache Controller is per hart and communicates + with both the upstream L1 caches and downstream L3 cache or memory, + enabling a high-performance cache subsystem. + endif diff --git a/drivers/soc/sifive/Makefile b/drivers/soc/sifive/Makefile index 1f5dc339bf82..707493e1c691 100644 --- a/drivers/soc/sifive/Makefile +++ b/drivers/soc/sifive/Makefile @@ -1,3 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_SIFIVE_CCACHE) += sifive_ccache.o +obj-$(CONFIG_SIFIVE_PL2) += sifive_pl2_cache.o diff --git a/drivers/soc/sifive/sifive_pl2.h b/drivers/soc/sifive/sifive_pl2.h new file mode 100644 index 000000000000..57aa1019d5ed --- /dev/null +++ b/drivers/soc/sifive/sifive_pl2.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2023 SiFive, Inc. + * + */ + +#ifndef _SIFIVE_PL2_H +#define _SIFIVE_PL2_H + +#define SIFIVE_PL2_CONFIG1_OFFSET 0x1000 +#define SIFIVE_PL2_CONFIG0_OFFSET 0x1008 +#define SIFIVE_PL2_PMCLIENT_OFFSET 0x2800 + +struct sifive_pl2_state { + void __iomem *pl2_base; + u32 config1; + u32 config0; + u64 pmclientfilter; +}; + +int sifive_pl2_pmu_init(void); +int sifive_pl2_pmu_probe(struct device_node *pl2_node, + void __iomem *pl2_base, int cpu); + +#endif /*_SIFIVE_PL2_H */ diff --git a/drivers/soc/sifive/sifive_pl2_cache.c b/drivers/soc/sifive/sifive_pl2_cache.c new file mode 100644 index 000000000000..aeb51d576af9 --- /dev/null +++ b/drivers/soc/sifive/sifive_pl2_cache.c @@ -0,0 +1,202 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SiFive private L2 cache controller Driver + * + * Copyright (C) 2018-2023 SiFive, Inc. + */ + +#define pr_fmt(fmt) "pL2CACHE: " fmt + +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/io.h> +#include <linux/cpu_pm.h> +#include <linux/cpuhotplug.h> +#include "sifive_pl2.h" + +static DEFINE_PER_CPU(struct sifive_pl2_state, sifive_pl2_state); + +static void sifive_pl2_state_save(struct sifive_pl2_state *pl2_state) +{ + void __iomem *pl2_base = pl2_state->pl2_base; + + if (!pl2_base) + return; + + pl2_state->config1 = readl(pl2_base + SIFIVE_PL2_CONFIG1_OFFSET); + pl2_state->config0 = readl(pl2_base + SIFIVE_PL2_CONFIG0_OFFSET); + pl2_state->pmclientfilter = readq(pl2_base + SIFIVE_PL2_PMCLIENT_OFFSET); +} + +static void sifive_pl2_state_restore(struct sifive_pl2_state *pl2_state) +{ + void __iomem *pl2_base = pl2_state->pl2_base; + + if (!pl2_base) + return; + + writel(pl2_state->config1, pl2_base + SIFIVE_PL2_CONFIG1_OFFSET); + writel(pl2_state->config0, pl2_base + SIFIVE_PL2_CONFIG0_OFFSET); + writeq(pl2_state->pmclientfilter, pl2_base + SIFIVE_PL2_PMCLIENT_OFFSET); +} + +/* + * CPU Hotplug call back function + */ +static int sifive_pl2_online_cpu(unsigned int cpu) +{ + struct sifive_pl2_state *pl2_state = this_cpu_ptr(&sifive_pl2_state); + + sifive_pl2_state_restore(pl2_state); + + return 0; +} + +static int sifive_pl2_offline_cpu(unsigned int cpu) +{ + struct sifive_pl2_state *pl2_state = this_cpu_ptr(&sifive_pl2_state); + + /* Save the pl2 state */ + sifive_pl2_state_save(pl2_state); + + return 0; +} + +/* + * PM notifer for suspend to ram + */ +#ifdef CONFIG_CPU_PM +static int sifive_pl2_pm_notify(struct notifier_block *b, unsigned long cmd, + void *v) +{ + struct sifive_pl2_state *pl2_state = this_cpu_ptr(&sifive_pl2_state); + + switch (cmd) { + case CPU_PM_ENTER: + /* Save the pl2 state */ + sifive_pl2_state_save(pl2_state); + break; + case CPU_PM_ENTER_FAILED: + case CPU_PM_EXIT: + sifive_pl2_state_restore(pl2_state); + break; + default: + break; + } + + return NOTIFY_OK; +} + +static struct notifier_block sifive_pl2_pm_notifier_block = { + .notifier_call = sifive_pl2_pm_notify, +}; + +static inline void sifive_pl2_pm_init(void) +{ + cpu_pm_register_notifier(&sifive_pl2_pm_notifier_block); +} + +#else +static inline void sifive_pl2_pm_init(void) { } +#endif /* CONFIG_CPU_PM */ + +static const struct of_device_id sifive_pl2_cache_of_ids[] = { + { .compatible = "sifive,pL2Cache0" }, + { .compatible = "sifive,pL2Cache1" }, + { /* sentinel value */ } +}; + +static void pl2_config_read(void __iomem *pl2_base, int cpu) +{ + u32 regval, bank, way, set, cacheline; + + regval = readl(pl2_base); + bank = regval & 0xff; + pr_info("in the CPU: %d\n", cpu); + pr_info("No. of Banks in the cache: %d\n", bank); + way = (regval & 0xff00) >> 8; + pr_info("No. of ways per bank: %d\n", way); + set = (regval & 0xff0000) >> 16; + pr_info("Total sets: %llu\n", (uint64_t)1 << set); + cacheline = (regval & 0xff000000) >> 24; + pr_info("Bytes per cache block: %llu\n", (uint64_t)1 << cacheline); + pr_info("Size: %d\n", way << (set + cacheline)); +} + +static int sifive_pl2_cache_dev_probe(struct platform_device *pdev) +{ + struct resource *res; + int cpu, ret = -EINVAL; + struct device_node *cpu_node, *pl2_node; + struct sifive_pl2_state *pl2_state = NULL; + void __iomem *pl2_base; + + /* Traverse all cpu nodes to find the one mapping to its pl2 node. */ + for_each_cpu(cpu, cpu_possible_mask) { + cpu_node = of_cpu_device_node_get(cpu); + pl2_node = of_parse_phandle(cpu_node, "next-level-cache", 0); + + /* Found it! */ + if (dev_of_node(&pdev->dev) == pl2_node) { + /* Use cpu to get its percpu data sifive_pl2_state. */ + pl2_state = per_cpu_ptr(&sifive_pl2_state, cpu); + break; + } + } + + if (!pl2_state) { + pr_err("Not found the corresponding cpu_node in dts.\n"); + goto early_err; + } + + /* Set base address of select and counter registers. */ + pl2_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); + if (IS_ERR(pl2_base)) { + ret = PTR_ERR(pl2_base); + goto early_err; + } + + /* Print pL2 configs. */ + pl2_config_read(pl2_base, cpu); + pl2_state->pl2_base = pl2_base; + + return 0; + +early_err: + return ret; +} + +static struct platform_driver sifive_pl2_cache_driver = { + .driver = { + .name = "SiFive-pL2-cache", + .of_match_table = sifive_pl2_cache_of_ids, + }, + .probe = sifive_pl2_cache_dev_probe, +}; + +static int __init sifive_pl2_cache_init(void) +{ + int ret; + + ret = cpuhp_setup_state(CPUHP_AP_RISCV_SIFIVE_PL2_ONLINE, + "soc/sifive/pl2:online", + sifive_pl2_online_cpu, + sifive_pl2_offline_cpu); + if (ret < 0) { + pr_err("Failed to register CPU hotplug notifier %d\n", ret); + return ret; + } + + ret = platform_driver_register(&sifive_pl2_cache_driver); + if (ret) { + pr_err("Failed to register sifive_pl2_cache_driver: %d\n", ret); + return ret; + } + + sifive_pl2_pm_init(); + + return ret; +} + +device_initcall(sifive_pl2_cache_init); diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 0f1001dca0e0..35cd5ba0030b 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -207,6 +207,7 @@ enum cpuhp_state { CPUHP_AP_IRQ_AFFINITY_ONLINE, CPUHP_AP_BLK_MQ_ONLINE, CPUHP_AP_ARM_MVEBU_SYNC_CLOCKS, + CPUHP_AP_RISCV_SIFIVE_PL2_ONLINE, CPUHP_AP_X86_INTEL_EPB_ONLINE, CPUHP_AP_PERF_ONLINE, CPUHP_AP_PERF_X86_ONLINE,