@@ -1161,7 +1161,8 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
return iov_iter_zero(min(length, end - pos), iter);
}
- if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED))
+ if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED &&
+ !(iomap->flags & IOMAP_F_SHARED)))
return -EIO;
/*
@@ -1200,6 +1201,12 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
break;
}
+ if (iomap != srcmap) {
+ ret = dax_copy_edges(pos, length, srcmap, kaddr, false);
+ if (ret)
+ break;
+ }
+
map_len = PFN_PHYS(map_len);
kaddr += offset;
map_len -= offset;
@@ -1311,6 +1318,7 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
vm_fault_t ret = 0;
void *entry;
pfn_t pfn;
+ void *kaddr;
trace_dax_pte_fault(inode, vmf, ret);
/*
@@ -1392,19 +1400,27 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
switch (iomap.type) {
case IOMAP_MAPPED:
+cow:
if (iomap.flags & IOMAP_F_NEW) {
count_vm_event(PGMAJFAULT);
count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
major = VM_FAULT_MAJOR;
}
error = dax_iomap_direct_access(&iomap, pos, PAGE_SIZE,
- NULL, &pfn);
+ &kaddr, &pfn);
if (error < 0)
goto error_finish_iomap;
entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn,
0, write && !sync);
+ if (srcmap.type != IOMAP_HOLE) {
+ error = dax_copy_edges(pos, PAGE_SIZE, &srcmap, kaddr,
+ false);
+ if (error)
+ goto error_finish_iomap;
+ }
+
/*
* If we are doing synchronous page fault and inode needs fsync,
* we can insert PTE into page tables only after that happens.
@@ -1428,6 +1444,9 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
goto finish_iomap;
case IOMAP_UNWRITTEN:
+ if (write && iomap.flags & IOMAP_F_SHARED)
+ goto cow;
+ fallthrough;
case IOMAP_HOLE:
if (!write) {
ret = dax_load_hole(&xas, mapping, &entry, vmf);
@@ -1535,6 +1554,7 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
loff_t pos;
int error;
pfn_t pfn;
+ void *kaddr;
/*
* Check whether offset isn't beyond end of file now. Caller is
@@ -1616,14 +1636,22 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
switch (iomap.type) {
case IOMAP_MAPPED:
+cow:
error = dax_iomap_direct_access(&iomap, pos, PMD_SIZE,
- NULL, &pfn);
+ &kaddr, &pfn);
if (error < 0)
goto finish_iomap;
entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn,
DAX_PMD, write && !sync);
+ if (srcmap.type != IOMAP_HOLE) {
+ error = dax_copy_edges(pos, PMD_SIZE, &srcmap, kaddr,
+ true);
+ if (error)
+ goto unlock_entry;
+ }
+
/*
* If we are doing synchronous page fault and inode needs fsync,
* we can insert PMD into page tables only after that happens.
@@ -1642,6 +1670,9 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
result = vmf_insert_pfn_pmd(vmf, pfn, write);
break;
case IOMAP_UNWRITTEN:
+ if (write && iomap.flags & IOMAP_F_SHARED)
+ goto cow;
+ fallthrough;
case IOMAP_HOLE:
if (WARN_ON_ONCE(write))
break;
Add dax_copy_edges() into each dax actor functions to perform CoW. Signed-off-by: Shiyang Ruan <ruansy.fnst@cn.fujitsu.com> --- fs/dax.c | 37 ++++++++++++++++++++++++++++++++++--- 1 file changed, 34 insertions(+), 3 deletions(-)