@@ -500,8 +500,6 @@ vm_fault_t vmw_bo_vm_huge_fault(struct vm_fault *vmf,
vm_fault_t ret;
pgoff_t fault_page_size;
bool write = vmf->flags & FAULT_FLAG_WRITE;
- bool is_cow_mapping =
- (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
switch (pe_size) {
case PE_SIZE_PMD:
@@ -518,7 +516,7 @@ vm_fault_t vmw_bo_vm_huge_fault(struct vm_fault *vmf,
}
/* Always do write dirty-tracking and COW on PTE level. */
- if (write && (READ_ONCE(vbo->dirty) || is_cow_mapping))
+ if (write && (READ_ONCE(vbo->dirty) || is_cow_mapping(vma->vm_flags)))
return VM_FAULT_FALLBACK;
ret = ttm_bo_vm_reserve(bo, vmf);
@@ -49,7 +49,7 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
vma->vm_ops = &vmw_vm_ops;
/* Use VM_PFNMAP rather than VM_MIXEDMAP if not a COW mapping */
- if ((vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) != VM_MAYWRITE)
+ if (!is_cow_mapping(vma->vm_flags))
vma->vm_flags = (vma->vm_flags & ~VM_MIXEDMAP) | VM_PFNMAP;
return 0;
@@ -1036,8 +1036,6 @@ struct clear_refs_private {
#ifdef CONFIG_MEM_SOFT_DIRTY
-#define is_cow_mapping(flags) (((flags) & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE)
-
static inline bool pte_is_pinned(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
{
struct page *page;
@@ -3731,15 +3731,13 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
pte_t *src_pte, *dst_pte, entry, dst_entry;
struct page *ptepage;
unsigned long addr;
- int cow;
+ bool cow = is_cow_mapping(vma->vm_flags);
struct hstate *h = hstate_vma(vma);
unsigned long sz = huge_page_size(h);
struct address_space *mapping = vma->vm_file->f_mapping;
struct mmu_notifier_range range;
int ret = 0;
- cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
-
if (cow) {
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, src,
vma->vm_start,