@@ -2290,11 +2290,7 @@ static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd,
return 0;
}
-/*
- * Variant of remap_pfn_range that does not call track_pfn_remap. The caller
- * must have pre-validated the caching bits of the pgprot_t.
- */
-int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
+static int remap_pfn_range_internal(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn, unsigned long size, pgprot_t prot)
{
pgd_t *pgd;
@@ -2347,6 +2343,27 @@ int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
return 0;
}
+/*
+ * Variant of remap_pfn_range that does not call track_pfn_remap. The caller
+ * must have pre-validated the caching bits of the pgprot_t.
+ */
+int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
+ unsigned long pfn, unsigned long size, pgprot_t prot)
+{
+ int error = remap_pfn_range_internal(vma, addr, pfn, size, prot);
+
+ if (!error)
+ return 0;
+
+ /*
+ * A partial pfn range mapping is dangerous: it does not
+ * maintain page reference counts, and callers may free
+ * pages due to the error. So zap it early.
+ */
+ zap_page_range_single(vma, addr, size, NULL);
+ return error;
+}
+
/**
* remap_pfn_range - remap kernel memory to userspace
* @vma: user vma to map to