@@ -374,6 +374,17 @@ static struct context_entry *device_to_existing_context_entry(
u8 bus, u8 devfn);
+/*
+ * A structure used to store the address allocated by ioremap();
+ * The we need to call iounmap() to free them out of spin_lock_irqsave/unlock;
+ */
+struct iommu_remapped_entry {
+ struct list_head list;
+ void __iomem *mem;
+};
+static LIST_HEAD(__iommu_remapped_mem);
+static DEFINE_MUTEX(__iommu_mem_list_lock);
+
#endif /* CONFIG_CRASH_DUMP */
/*
@@ -4822,4 +4833,95 @@ static struct context_entry *device_to_existing_context_entry(
return ret;
}
+/*
+ * Copy memory from a physically-addressed area into a virtually-addressed area
+ */
+int __iommu_load_from_oldmem(void *to, unsigned long from, unsigned long size)
+{
+ unsigned long pfn; /* Page Frame Number */
+ size_t csize = (size_t)size; /* Num(bytes to copy) */
+ unsigned long offset; /* Lower 12 bits of to */
+ void __iomem *virt_mem;
+ struct iommu_remapped_entry *mapped;
+
+ pfn = from >> VTD_PAGE_SHIFT;
+ offset = from & (~VTD_PAGE_MASK);
+
+ if (page_is_ram(pfn)) {
+ memcpy(to, pfn_to_kaddr(pfn) + offset, csize);
+ } else{
+
+ mapped = kzalloc(sizeof(struct iommu_remapped_entry),
+ GFP_KERNEL);
+ if (!mapped)
+ return -ENOMEM;
+
+ virt_mem = ioremap_cache((unsigned long)from, size);
+ if (!virt_mem) {
+ kfree(mapped);
+ return -ENOMEM;
+ }
+ memcpy(to, virt_mem, size);
+
+ mutex_lock(&__iommu_mem_list_lock);
+ mapped->mem = virt_mem;
+ list_add_tail(&mapped->list, &__iommu_remapped_mem);
+ mutex_unlock(&__iommu_mem_list_lock);
+ }
+ return size;
+}
+
+/*
+ * Copy memory from a virtually-addressed area into a physically-addressed area
+ */
+int __iommu_save_to_oldmem(unsigned long to, void *from, unsigned long size)
+{
+ unsigned long pfn; /* Page Frame Number */
+ size_t csize = (size_t)size; /* Num(bytes to copy) */
+ unsigned long offset; /* Lower 12 bits of to */
+ void __iomem *virt_mem;
+ struct iommu_remapped_entry *mapped;
+
+ pfn = to >> VTD_PAGE_SHIFT;
+ offset = to & (~VTD_PAGE_MASK);
+
+ if (page_is_ram(pfn)) {
+ memcpy(pfn_to_kaddr(pfn) + offset, from, csize);
+ } else{
+ mapped = kzalloc(sizeof(struct iommu_remapped_entry),
+ GFP_KERNEL);
+ if (!mapped)
+ return -ENOMEM;
+
+ virt_mem = ioremap_cache((unsigned long)to, size);
+ if (!virt_mem) {
+ kfree(mapped);
+ return -ENOMEM;
+ }
+ memcpy(virt_mem, from, size);
+ mutex_lock(&__iommu_mem_list_lock);
+ mapped->mem = virt_mem;
+ list_add_tail(&mapped->list, &__iommu_remapped_mem);
+ mutex_unlock(&__iommu_mem_list_lock);
+ }
+ return size;
+}
+
+/*
+ * Free the mapped memory for ioremap;
+ */
+int __iommu_free_mapped_mem(void)
+{
+ struct iommu_remapped_entry *mem_entry, *tmp;
+
+ mutex_lock(&__iommu_mem_list_lock);
+ list_for_each_entry_safe(mem_entry, tmp, &__iommu_remapped_mem, list) {
+ iounmap(mem_entry->mem);
+ list_del(&mem_entry->list);
+ kfree(mem_entry);
+ }
+ mutex_unlock(&__iommu_mem_list_lock);
+ return 0;
+}
+
#endif /* CONFIG_CRASH_DUMP */
@@ -26,6 +26,7 @@
#include <linux/iova.h>
#include <linux/io.h>
#include <linux/dma_remapping.h>
+#include <linux/crash_dump.h>
#include <asm/cacheflush.h>
#include <asm/iommu.h>
@@ -368,4 +369,12 @@ extern int dmar_ir_support(void);
extern const struct attribute_group *intel_iommu_groups[];
+#ifdef CONFIG_CRASH_DUMP
+extern int __iommu_load_from_oldmem(void *to, unsigned long from,
+ unsigned long size);
+extern int __iommu_save_to_oldmem(unsigned long to, void *from,
+ unsigned long size);
+extern int __iommu_free_mapped_mem(void);
+#endif /* CONFIG_CRASH_DUMP */
+
#endif