@@ -33,7 +33,7 @@ static vm_fault_t psb_fbdev_vm_fault(struct vm_fault *vmf)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
for (i = 0; i < page_num; ++i) {
- err = vmf_insert_mixed(vma, address, __pfn_to_pfn_t(pfn, PFN_DEV));
+ err = vmf_insert_mixed(vma, address, __pfn_to_pfn_t(pfn, 0));
if (unlikely(err & VM_FAULT_ERROR))
break;
address += PAGE_SIZE;
@@ -371,8 +371,7 @@ static vm_fault_t omap_gem_fault_1d(struct drm_gem_object *obj,
VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
pfn, pfn << PAGE_SHIFT);
- return vmf_insert_mixed(vma, vmf->address,
- __pfn_to_pfn_t(pfn, PFN_DEV));
+ return vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, 0));
}
/* Special handling for the case of faulting in 2d tiled buffers */
@@ -468,7 +467,7 @@ static vm_fault_t omap_gem_fault_2d(struct drm_gem_object *obj,
for (i = n; i > 0; i--) {
ret = vmf_insert_mixed(vma,
- vaddr, __pfn_to_pfn_t(pfn, PFN_DEV));
+ vaddr, __pfn_to_pfn_t(pfn, 0));
if (ret & VM_FAULT_ERROR)
break;
pfn += priv->usergart[fmt].stride_pfn;
@@ -924,8 +924,7 @@ __dcssblk_direct_access(struct dcssblk_dev_info *dev_info, pgoff_t pgoff,
if (kaddr)
*kaddr = __va(dev_info->start + offset);
if (pfn)
- *pfn = __pfn_to_pfn_t(PFN_DOWN(dev_info->start + offset),
- PFN_DEV);
+ *pfn = __pfn_to_pfn_t(PFN_DOWN(dev_info->start + offset), 0);
return (dev_sz - offset) / PAGE_SIZE;
}
@@ -1680,14 +1680,12 @@ static vm_fault_t vfio_pci_mmap_huge_fault(struct vm_fault *vmf,
break;
#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
case PMD_ORDER:
- ret = vmf_insert_pfn_pmd(vmf, __pfn_to_pfn_t(pfn + pgoff,
- PFN_DEV), false);
+ ret = vmf_insert_pfn_pmd(vmf, __pfn_to_pfn_t(pfn + pgoff, 0), false);
break;
#endif
#ifdef CONFIG_ARCH_SUPPORTS_PUD_PFNMAP
case PUD_ORDER:
- ret = vmf_insert_pfn_pud(vmf, __pfn_to_pfn_t(pfn + pgoff,
- PFN_DEV), false);
+ ret = vmf_insert_pfn_pud(vmf, __pfn_to_pfn_t(pfn + pgoff, 0), false);
break;
#endif
default:
@@ -412,7 +412,7 @@ static int cramfs_physmem_mmap(struct file *file, struct vm_area_struct *vma)
for (i = 0; i < pages && !ret; i++) {
vm_fault_t vmf;
unsigned long off = i * PAGE_SIZE;
- pfn_t pfn = phys_to_pfn_t(address + off, PFN_DEV);
+ pfn_t pfn = phys_to_pfn_t(address + off, 0);
vmf = vmf_insert_mixed(vma, vma->vm_start + off, pfn);
if (vmf & VM_FAULT_ERROR)
ret = vm_fault_to_errno(vmf, 0);
@@ -11,10 +11,8 @@
* PFN_MAP - pfn has a dynamic page mapping established by a device driver
*/
#define PFN_FLAGS_MASK (((u64) (~PAGE_MASK)) << (BITS_PER_LONG_LONG - PAGE_SHIFT))
-#define PFN_DEV (1ULL << (BITS_PER_LONG_LONG - 3))
-#define PFN_FLAGS_TRACE \
- { PFN_DEV, "DEV" }
+#define PFN_FLAGS_TRACE { }
static inline pfn_t __pfn_to_pfn_t(unsigned long pfn, u64 flags)
{
@@ -2510,9 +2510,9 @@ vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
if (!pfn_modify_allowed(pfn, pgprot))
return VM_FAULT_SIGBUS;
- track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV));
+ track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, 0));
- return insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot,
+ return insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, 0), pgprot,
false);
}
EXPORT_SYMBOL(vmf_insert_pfn_prot);
PFN_DEV is used by callers of dax_direct_access() to figure out if the returned PFN is associated with a page or not. However all DAX PFNs now require an assoicated ZONE_DEVICE page so can assume a page exists. Other users of PFN_DEV were setting it before calling vmf_insert_mixed(). This is unnecessary as it is no longer checked, instead relying on pfn_valid() to determine if there is an associated page or not. Signed-off-by: Alistair Popple <apopple@nvidia.com> --- drivers/gpu/drm/gma500/fbdev.c | 2 +- drivers/gpu/drm/omapdrm/omap_gem.c | 5 ++--- drivers/s390/block/dcssblk.c | 3 +-- drivers/vfio/pci/vfio_pci_core.c | 6 ++---- fs/cramfs/inode.c | 2 +- include/linux/pfn_t.h | 4 +--- mm/memory.c | 4 ++-- 7 files changed, 10 insertions(+), 16 deletions(-)