diff mbox series

[RFC,v2,12/26] mm/asi: Function to copy page-table entries for percpu buffer

Message ID 1562855138-19507-13-git-send-email-alexandre.chartre@oracle.com (mailing list archive)
State New, archived
Headers show
Series Kernel Address Space Isolation | expand

Commit Message

Alexandre Chartre July 11, 2019, 2:25 p.m. UTC
Provide functions to copy page-table entries from the kernel page-table
to an ASI page-table for a percpu buffer. A percpu buffer have a different
VA range for each cpu and all them have to be copied.

Signed-off-by: Alexandre Chartre <alexandre.chartre@oracle.com>
---
 arch/x86/include/asm/asi.h  |    6 ++++++
 arch/x86/mm/asi_pagetable.c |   38 ++++++++++++++++++++++++++++++++++++++
 2 files changed, 44 insertions(+), 0 deletions(-)
diff mbox series

Patch

diff --git a/arch/x86/include/asm/asi.h b/arch/x86/include/asm/asi.h
index 919129f..912b6a7 100644
--- a/arch/x86/include/asm/asi.h
+++ b/arch/x86/include/asm/asi.h
@@ -105,6 +105,12 @@  static inline int asi_map_module(struct asi *asi, char *module_name)
 	return asi_map(asi, module->core_layout.base, module->core_layout.size);
 }
 
+#define	ASI_MAP_CPUVAR(asi, cpuvar)	\
+	asi_map_percpu(asi, &cpuvar, sizeof(cpuvar))
+
+extern int asi_map_percpu(struct asi *asi, void *percpu_ptr, size_t size);
+extern void asi_unmap_percpu(struct asi *asi, void *percpu_ptr);
+
 /*
  * Function to exit the current isolation. This is used to abort isolation
  * when a task using isolation is scheduled out.
diff --git a/arch/x86/mm/asi_pagetable.c b/arch/x86/mm/asi_pagetable.c
index 7aee236..a4fe867 100644
--- a/arch/x86/mm/asi_pagetable.c
+++ b/arch/x86/mm/asi_pagetable.c
@@ -804,3 +804,41 @@  void asi_unmap(struct asi *asi, void *ptr)
 	spin_unlock_irqrestore(&asi->lock, flags);
 }
 EXPORT_SYMBOL(asi_unmap);
+
+void asi_unmap_percpu(struct asi *asi, void *percpu_ptr)
+{
+	void *ptr;
+	int cpu;
+
+	pr_debug("ASI %p: UNMAP PERCPU %px\n", asi, percpu_ptr);
+	for_each_possible_cpu(cpu) {
+		ptr = per_cpu_ptr(percpu_ptr, cpu);
+		pr_debug("ASI %p: UNMAP PERCPU%d %px\n", asi, cpu, ptr);
+		asi_unmap(asi, ptr);
+	}
+}
+EXPORT_SYMBOL(asi_unmap_percpu);
+
+int asi_map_percpu(struct asi *asi, void *percpu_ptr, size_t size)
+{
+	int cpu, err;
+	void *ptr;
+
+	pr_debug("ASI %p: MAP PERCPU %px\n", asi, percpu_ptr);
+	for_each_possible_cpu(cpu) {
+		ptr = per_cpu_ptr(percpu_ptr, cpu);
+		pr_debug("ASI %p: MAP PERCPU%d %px\n", asi, cpu, ptr);
+		err = asi_map(asi, ptr, size);
+		if (err) {
+			/*
+			 * Need to unmap any percpu mapping which has
+			 * succeeded before the failure.
+			 */
+			asi_unmap_percpu(asi, percpu_ptr);
+			return err;
+		}
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(asi_map_percpu);