@@ -250,6 +250,8 @@ struct vm_operations_struct {
/* notification that a previously read-only page is about to become
* writable, if an error is returned it will cause a SIGBUS */
int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf);
+ /* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */
+ int (*pfn_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf);
/* called by access_process_vm when get_user_pages() fails, typically
* for use by special VMAs that can switch between memory and hardware
@@ -1982,6 +1982,22 @@ static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page,
return ret;
}
+static int do_pfn_mkwrite(struct vm_area_struct *vma, unsigned long address)
+{
+ struct vm_fault vmf;
+
+ if (!vma->vm_ops || !vma->vm_ops->pfn_mkwrite)
+ return 0;
+
+ vmf.page = 0;
+ vmf.pgoff = (((address & PAGE_MASK) - vma->vm_start) >> PAGE_SHIFT) +
+ vma->vm_pgoff;
+ vmf.virtual_address = (void __user *)(address & PAGE_MASK);
+ vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
+
+ return vma->vm_ops->pfn_mkwrite(vma, &vmf);
+}
+
/*
* This routine handles present pages, when users try to write
* to a shared page. It is done by copying the page to a new address
@@ -2025,8 +2041,17 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
* accounting on raw pfn maps.
*/
if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
- (VM_WRITE|VM_SHARED))
+ (VM_WRITE|VM_SHARED)) {
+ pte_unmap_unlock(page_table, ptl);
+ ret = do_pfn_mkwrite(vma, address);
+ if (ret & VM_FAULT_ERROR)
+ return ret;
+ page_table = pte_offset_map_lock(mm, pmd, address,
+ &ptl);
+ if (!pte_same(*page_table, orig_pte))
+ goto unlock;
goto reuse;
+ }
goto gotten;
}