@@ -9,3 +9,4 @@ ifdef CONFIG_INTEL_IOMMU
obj-$(CONFIG_IRQ_REMAP) += irq_remapping.o
endif
obj-$(CONFIG_INTEL_IOMMU_PERF_EVENTS) += perfmon.o
+obj-$(CONFIG_KEXEC_KHO) += serialise.o
@@ -65,6 +65,7 @@ static int rwbf_quirk;
static int force_on = 0;
static int intel_iommu_tboot_noforce;
static int no_platform_optin;
+DEFINE_XARRAY(persistent_domains);
#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
@@ -3393,6 +3394,10 @@ static __init int tboot_force_iommu(void)
return 1;
}
+static struct notifier_block serialise_kho_nb = {
+ .notifier_call = intel_iommu_serialise_kho,
+};
+
int __init intel_iommu_init(void)
{
int ret = -ENODEV;
@@ -3432,6 +3437,12 @@ int __init intel_iommu_init(void)
if (!no_iommu)
intel_iommu_debugfs_init();
+ if (IS_ENABLED(CONFIG_KEXEC_KHO)) {
+ ret = register_kho_notifier(&serialise_kho_nb);
+ if (ret)
+ goto out_free_dmar;
+ }
+
if (no_iommu || dmar_disabled) {
/*
* We exit the function here to ensure IOMMU's remapping and
@@ -3738,6 +3749,7 @@ intel_iommu_domain_alloc_user(struct device *dev, u32 flags,
struct intel_iommu *iommu = info->iommu;
struct dmar_domain *dmar_domain;
struct iommu_domain *domain;
+ int rc;
/* Must be NESTING domain */
if (parent) {
@@ -3778,6 +3790,12 @@ intel_iommu_domain_alloc_user(struct device *dev, u32 flags,
domain->dirty_ops = &intel_dirty_ops;
}
+ if (persistent_id) {
+ rc = xa_insert(&persistent_domains, persistent_id, domain, GFP_KERNEL_ACCOUNT);
+ if (rc)
+ pr_warn("Unable to track persistent domain %lu\n", persistent_id);
+ }
+
return domain;
}
@@ -11,6 +11,7 @@
#define _INTEL_IOMMU_H_
#include <linux/types.h>
+#include <linux/kexec.h>
#include <linux/iova.h>
#include <linux/io.h>
#include <linux/idr.h>
@@ -496,6 +497,7 @@ struct q_inval {
#define PRQ_DEPTH ((0x1000 << PRQ_ORDER) >> 5)
struct dmar_pci_notify_info;
+extern struct xarray persistent_domains;
#ifdef CONFIG_IRQ_REMAP
/* 1MB - maximum possible interrupt remapping table size */
@@ -1225,6 +1227,22 @@ static inline int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
#define intel_iommu_sm (0)
#endif
+#ifdef CONFIG_KEXEC_KHO
+int intel_iommu_serialise_kho(struct notifier_block *self, unsigned long cmd,
+ void *fdt);
+int __init intel_iommu_deserialise_kho(void);
+#else
+int intel_iommu_serialise_kho(struct notifier_block *self, unsigned long cmd,
+ void *fdt)
+{
+ return 0;
+}
+int __init intel_iommu_deserialise_kho(void)
+{
+ return 0;
+}
+#endif /* CONFIG_KEXEC_KHO */
+
static inline const char *decode_prq_descriptor(char *str, size_t size,
u64 dw0, u64 dw1, u64 dw2, u64 dw3)
{
new file mode 100644
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include "iommu.h"
+
+static int serialise_domain(void *fdt, struct iommu_domain *domain)
+{
+ return 0;
+}
+
+int intel_iommu_serialise_kho(struct notifier_block *self, unsigned long cmd,
+ void *fdt)
+{
+ static const char compatible[] = "intel-iommu-v0";
+ struct iommu_domain *domain;
+ unsigned long xa_idx;
+ int err = 0;
+
+ switch (cmd) {
+ case KEXEC_KHO_ABORT:
+ /* Would do serialise rollback here. */
+ return NOTIFY_DONE;
+ case KEXEC_KHO_DUMP:
+ err |= fdt_begin_node(fdt, "intel-iommu");
+ fdt_property(fdt, "compatible", compatible, sizeof(compatible));
+ err |= fdt_begin_node(fdt, "domains");
+ xa_for_each(&persistent_domains, xa_idx, domain) {
+ err |= serialise_domain(fdt, domain);
+ }
+ err |= fdt_end_node(fdt); /* domains */
+ err |= fdt_end_node(fdt); /* intel-iommu*/
+ return err? NOTIFY_BAD : NOTIFY_DONE;
+ default:
+ return NOTIFY_BAD;
+ }
+}
+
+int __init intel_iommu_deserialise_kho(void)
+{
+ return 0;
+}