@@ -301,7 +301,8 @@ struct vm_fault {
* the 'address' */
pte_t orig_pte; /* Value of PTE at the time of fault */
- struct page *cow_page; /* Handler may choose to COW */
+ struct page *cow_page; /* Page handler may use for COW fault */
+ struct mem_cgroup *memcg; /* Cgroup cow_page belongs to */
struct page *page; /* ->fault handlers should return a
* page here, unless VM_FAULT_NOPAGE
* is set (which is also implied by
@@ -1103,6 +1104,7 @@ static inline void clear_page_pfmemalloc(struct page *page)
#define VM_FAULT_RETRY 0x0400 /* ->fault blocked, must retry */
#define VM_FAULT_FALLBACK 0x0800 /* huge page fault failed, fall back to small */
#define VM_FAULT_DAX_LOCKED 0x1000 /* ->fault has locked DAX entry */
+#define VM_FAULT_DONE_COW 0x2000 /* ->fault has fully handled COW */
#define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */
@@ -2848,9 +2848,8 @@ static int __do_fault(struct vm_fault *vmf)
int ret;
ret = vma->vm_ops->fault(vma, vmf);
- if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
- return ret;
- if (ret & VM_FAULT_DAX_LOCKED)
+ if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY |
+ VM_FAULT_DAX_LOCKED | VM_FAULT_DONE_COW)))
return ret;
if (unlikely(PageHWPoison(vmf->page))) {
@@ -3209,9 +3208,12 @@ static int do_cow_fault(struct vm_fault *vmf)
}
vmf->cow_page = new_page;
+ vmf->memcg = memcg;
ret = __do_fault(vmf);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
goto uncharge_out;
+ if (ret & VM_FAULT_DONE_COW)
+ return ret;
if (!(ret & VM_FAULT_DAX_LOCKED))
copy_user_highpage(new_page, vmf->page, vmf->address, vma);