@@ -6,6 +6,18 @@ config CACHE_COHERENCY_CLASS
help
Class to which coherency control drivers register allowing core kernel
subsystems to issue invalidations and similar coherency operations.
+if CACHE_COHERENCY_CLASS
+
+config HISI_SOC_HHA
+ tristate "HiSilicon Hydra Home Agent (HHA) device driver"
+ depends on ARM64 && ACPI || COMPILE_TEST
+ help
+ The Hydra Home Agent (HHA) is responsible of cache coherency
+ on SoC. This drivers provides cache maintenance functions of HHA.
+
+ This driver can be built as a module. If so, the module will be
+ called hisi_soc_hha.
+endif
config AX45MP_L2_CACHE
bool "Andes Technology AX45MP L2 Cache controller"
@@ -5,3 +5,4 @@ obj-$(CONFIG_SIFIVE_CCACHE) += sifive_ccache.o
obj-$(CONFIG_STARFIVE_STARLINK_CACHE) += starfive_starlink_cache.o
obj-$(CONFIG_CACHE_COHERENCY_CLASS) += coherency_core.o
+obj-$(CONFIG_HISI_SOC_HHA) += hisi_soc_hha.o
new file mode 100644
@@ -0,0 +1,193 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for HiSilicon Hydra Home Agent (HHA).
+ *
+ * Copyright (c) 2024 HiSilicon Technologies Co., Ltd.
+ * Author: Yicong Yang <yangyicong@hisilicon.com>
+ * Yushan Wang <wangyushan12@huawei.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/bitfield.h>
+#include <linux/cacheflush.h>
+#include <linux/cache_coherency.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/memregion.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+
+#define HISI_HHA_CTRL 0x5004
+#define HISI_HHA_CTRL_EN BIT(0)
+#define HISI_HHA_CTRL_RANGE BIT(1)
+#define HISI_HHA_CTRL_TYPE GENMASK(3, 2)
+#define HISI_HHA_START_L 0x5008
+#define HISI_HHA_START_H 0x500c
+#define HISI_HHA_LEN_L 0x5010
+#define HISI_HHA_LEN_H 0x5014
+
+/* The maintain operation performs in a 128 Byte granularity */
+#define HISI_HHA_MAINT_ALIGN 128
+
+#define HISI_HHA_POLL_GAP_US 10
+#define HISI_HHA_POLL_TIMEOUT_US 50000
+
+struct hisi_soc_hha {
+ struct cache_coherency_device ccd;
+ /* Locks HHA instance to forbid overlapping access. */
+ spinlock_t lock;
+ void __iomem *base;
+};
+
+static bool hisi_hha_cache_maintain_wait_finished(struct hisi_soc_hha *soc_hha)
+{
+ u32 val;
+
+ return !readl_poll_timeout_atomic(soc_hha->base + HISI_HHA_CTRL, val,
+ !(val & HISI_HHA_CTRL_EN),
+ HISI_HHA_POLL_GAP_US,
+ HISI_HHA_POLL_TIMEOUT_US);
+}
+
+static int hisi_hha_cache_do_maintain(struct hisi_soc_hha *soc_hha,
+ phys_addr_t addr, size_t size)
+{
+ int ret = 0;
+ u32 reg;
+
+ if (!size || !IS_ALIGNED(size, HISI_HHA_MAINT_ALIGN))
+ return -EINVAL;
+
+ /*
+ * Hardware will maintain in a range of
+ * [addr, addr + size + HISI_HHA_MAINT_ALIGN]
+ */
+ size -= HISI_HHA_MAINT_ALIGN;
+
+ guard(spinlock)(&soc_hha->lock);
+
+ if (!hisi_hha_cache_maintain_wait_finished(soc_hha))
+ return -EBUSY;
+
+ writel(lower_32_bits(addr), soc_hha->base + HISI_HHA_START_L);
+ writel(upper_32_bits(addr), soc_hha->base + HISI_HHA_START_H);
+ writel(lower_32_bits(size), soc_hha->base + HISI_HHA_LEN_L);
+ writel(upper_32_bits(size), soc_hha->base + HISI_HHA_LEN_H);
+
+ reg = FIELD_PREP(HISI_HHA_CTRL_TYPE, 1); /* Clean Invalid */
+ reg |= HISI_HHA_CTRL_RANGE | HISI_HHA_CTRL_EN;
+ writel(reg, soc_hha->base + HISI_HHA_CTRL);
+
+ return ret;
+}
+
+static int hisi_hha_cache_poll_maintain_done(struct hisi_soc_hha *soc_hha,
+ phys_addr_t addr, size_t size)
+{
+ guard(spinlock)(&soc_hha->lock);
+
+ if (!hisi_hha_cache_maintain_wait_finished(soc_hha))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int hisi_soc_hha_wbinv(struct cache_coherency_device *ccd, struct cc_inval_params *invp)
+{
+ struct hisi_soc_hha *hha = container_of(ccd, struct hisi_soc_hha, ccd);
+
+ return hisi_hha_cache_do_maintain(hha, invp->addr, invp->size);
+}
+
+static int hisi_soc_hha_done(struct cache_coherency_device *ccd)
+{
+ struct hisi_soc_hha *hha = container_of(ccd, struct hisi_soc_hha, ccd);
+
+ return hisi_hha_cache_poll_maintain_done(hha, 0, 0);
+}
+
+static const struct coherency_ops hha_ops = {
+ .wbinv = hisi_soc_hha_wbinv,
+ .done = hisi_soc_hha_done,
+};
+
+DEFINE_FREE(hisi_soc_hha, struct hisi_soc_hha *, if (_T) cache_coherency_device_put(&_T->ccd))
+static int hisi_soc_hha_probe(struct platform_device *pdev)
+{
+ struct resource *mem;
+ int ret;
+
+ struct hisi_soc_hha *soc_hha __free(hisi_soc_hha) =
+ cache_coherency_alloc_device(&pdev->dev, &hha_ops,
+ struct hisi_soc_hha, ccd);
+ if (!soc_hha)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, soc_hha);
+
+ spin_lock_init(&soc_hha->lock);
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem)
+ return -ENODEV;
+
+ /*
+ * HHA cache driver share the same register region with HHA uncore PMU
+ * driver in hardware's perspective, none of them should reserve the
+ * resource to itself only. Here exclusive access verification is
+ * avoided by calling devm_ioremap instead of devm_ioremap_resource to
+ * allow both drivers to exist at the same time.
+ */
+ soc_hha->base = ioremap(mem->start, resource_size(mem));
+ if (IS_ERR_OR_NULL(soc_hha->base)) {
+ return dev_err_probe(&pdev->dev, PTR_ERR(soc_hha->base),
+ "failed to remap io memory");
+ }
+
+ ret = cache_coherency_device_register(&soc_hha->ccd);
+ if (ret) {
+ iounmap(soc_hha->base);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, no_free_ptr(soc_hha));
+ return 0;
+}
+
+static void hisi_soc_hha_remove(struct platform_device *pdev)
+{
+ struct hisi_soc_hha *soc_hha = platform_get_drvdata(pdev);
+
+ cache_coherency_device_unregister(&soc_hha->ccd);
+ iounmap(soc_hha->base);
+ cache_coherency_device_put(&soc_hha->ccd);
+}
+
+static const struct acpi_device_id hisi_soc_hha_ids[] = {
+ { "HISI0511", },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, hisi_soc_hha_ids);
+
+static struct platform_driver hisi_soc_hha_driver = {
+ .driver = {
+ .name = "hisi_soc_hha",
+ .acpi_match_table = hisi_soc_hha_ids,
+ },
+ .probe = hisi_soc_hha_probe,
+ .remove = hisi_soc_hha_remove,
+};
+
+module_platform_driver(hisi_soc_hha_driver);
+
+MODULE_IMPORT_NS("CACHE_COHERENCY");
+MODULE_DESCRIPTION("Hisilicon Hydra Home Agent driver supporting cache maintenance");
+MODULE_AUTHOR("Yicong Yang <yangyicong@hisilicon.com>");
+MODULE_AUTHOR("Yushan Wang <wangyushan12@huawei.com>");
+MODULE_LICENSE("GPL");