diff mbox series

[RFC,v2,1/3] crash: export dev memmap header to vmcoreinfo

Message ID 20230427101838.12267-2-lizhijian@fujitsu.com (mailing list archive)
State Superseded
Headers show
Series pmem memmap dump support | expand

Commit Message

Zhijian Li (Fujitsu) April 27, 2023, 10:18 a.m. UTC
Introduce a symbol and export it to vmcoreinfo. Dumping
applications such as makedumpfile, with this variable, they are able to
restore a linked list which contained the memmap region located in
device.

With this mechanism, nvdimm/pmem which allows placing memmap in device
is able to export the its memmap(page array) to kdump kernel via
vmcoreinfo.

CC: Baoquan He <bhe@redhat.com>
CC: Vivek Goyal <vgoyal@redhat.com>
CC: Dave Young <dyoung@redhat.com>
Signed-off-by: Li Zhijian <lizhijian@fujitsu.com>
---
 include/linux/crash_core.h |  8 +++++
 kernel/crash_core.c        | 61 ++++++++++++++++++++++++++++++++++++++
 2 files changed, 69 insertions(+)
diff mbox series

Patch

diff --git a/include/linux/crash_core.h b/include/linux/crash_core.h
index de62a722431e..05ec2777f4fd 100644
--- a/include/linux/crash_core.h
+++ b/include/linux/crash_core.h
@@ -84,4 +84,12 @@  int parse_crashkernel_high(char *cmdline, unsigned long long system_ram,
 int parse_crashkernel_low(char *cmdline, unsigned long long system_ram,
 		unsigned long long *crash_size, unsigned long long *crash_base);
 
+#ifdef CONFIG_CRASH_CORE
+void devm_memmap_vmcore_delete(void *match);
+void devm_memmap_vmcore_update(void *match, u64 pfn, u64 npfn, bool dev);
+#else
+#define devm_memmap_vmcore_delete(match) do {} while (0)
+#define devm_memmap_vmcore_update(match, pfn, npfn, dev) do {} while (0)
+#endif
+
 #endif /* LINUX_CRASH_CORE_H */
diff --git a/kernel/crash_core.c b/kernel/crash_core.c
index 755f5f08ab38..f28cbd98f28b 100644
--- a/kernel/crash_core.c
+++ b/kernel/crash_core.c
@@ -401,6 +401,61 @@  phys_addr_t __weak paddr_vmcoreinfo_note(void)
 }
 EXPORT_SYMBOL(paddr_vmcoreinfo_note);
 
+struct devm_memmap_vmcore {
+	struct list_head entry;
+	unsigned long start;
+	unsigned long end;
+	void *match;
+};
+
+static struct devm_memmap_vmcore devm_memmap_vmcore_head = {
+	.entry = LIST_HEAD_INIT(devm_memmap_vmcore_head.entry),
+};
+static DEFINE_MUTEX(devm_memmap_vmcore_mutex);
+
+static void devm_memmap_vmcore_add(void *match, u64 pfn, u64 npfn)
+{
+	struct devm_memmap_vmcore *metadata;
+
+	metadata = kzalloc(sizeof(*metadata), GFP_KERNEL);
+	if (!metadata) {
+		pr_err("No enough memory");
+		return;
+	}
+
+	metadata->start = pfn;
+	metadata->end = pfn + npfn;
+	metadata->match = match;
+
+	mutex_lock(&devm_memmap_vmcore_mutex);
+	list_add(&metadata->entry, &devm_memmap_vmcore_head.entry);
+	mutex_unlock(&devm_memmap_vmcore_mutex);
+}
+
+void devm_memmap_vmcore_delete(void *match)
+{
+	struct devm_memmap_vmcore *metadata;
+
+	mutex_lock(&devm_memmap_vmcore_mutex);
+	list_for_each_entry(metadata, &devm_memmap_vmcore_head.entry, entry) {
+		if (metadata->match == match) {
+			list_del(&metadata->entry);
+			kfree(metadata);
+			break;
+		}
+	}
+	mutex_unlock(&devm_memmap_vmcore_mutex);
+}
+EXPORT_SYMBOL_GPL(devm_memmap_vmcore_delete);
+
+void devm_memmap_vmcore_update(void *match, u64 start_pfn, u64 npfn, bool dev)
+{
+	devm_memmap_vmcore_delete(match);
+	if (dev)
+		devm_memmap_vmcore_add(match, start_pfn, npfn);
+}
+EXPORT_SYMBOL_GPL(devm_memmap_vmcore_update);
+
 static int __init crash_save_vmcoreinfo_init(void)
 {
 	vmcoreinfo_data = (unsigned char *)get_zeroed_page(GFP_KERNEL);
@@ -436,6 +491,12 @@  static int __init crash_save_vmcoreinfo_init(void)
 	VMCOREINFO_SYMBOL(contig_page_data);
 #endif
 #ifdef CONFIG_SPARSEMEM
+	VMCOREINFO_SYMBOL(devm_memmap_vmcore_head);
+	VMCOREINFO_STRUCT_SIZE(devm_memmap_vmcore);
+	VMCOREINFO_OFFSET(devm_memmap_vmcore, entry);
+	VMCOREINFO_OFFSET(devm_memmap_vmcore, start);
+	VMCOREINFO_OFFSET(devm_memmap_vmcore, end);
+
 	VMCOREINFO_SYMBOL_ARRAY(mem_section);
 	VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS);
 	VMCOREINFO_STRUCT_SIZE(mem_section);