@@ -216,6 +216,7 @@ static vm_fault_t i915_error_to_vmf_fault(int err)
case -ENOSPC: /* shmemfs allocation failure */
case -ENOMEM: /* our allocation failure */
+ case -ENXIO:
return VM_FAULT_OOM;
case 0:
@@ -236,11 +237,10 @@ static vm_fault_t vm_fault_cpu(struct vm_fault *vmf)
struct vm_area_struct *area = vmf->vma;
struct i915_mmap_offset *mmo = area->vm_private_data;
struct drm_i915_gem_object *obj = mmo->obj;
+ bool use_dma =
+ !i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_STRUCT_PAGE);
int err;
- if (unlikely(!i915_gem_object_has_struct_page(obj)))
- return VM_FAULT_SIGBUS;
-
/* Sanity check that we allow writing into this object */
if (unlikely(i915_gem_object_is_readonly(obj) &&
area->vm_flags & VM_WRITE))
@@ -251,9 +251,9 @@ static vm_fault_t vm_fault_cpu(struct vm_fault *vmf)
goto out;
/* PTEs are revoked in obj->ops->put_pages() */
- err = remap_io_sg_page(area,
- area->vm_start, area->vm_end - area->vm_start,
- obj->mm.pages->sgl);
+ err = remap_io_sg(area,
+ area->vm_start, area->vm_end - area->vm_start,
+ obj->mm.pages->sgl, use_dma);
if (area->vm_flags & VM_WRITE) {
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
@@ -552,7 +552,9 @@ __assign_mmap_offset(struct drm_file *file,
}
if (mmap_type != I915_MMAP_TYPE_GTT &&
- !i915_gem_object_has_struct_page(obj)) {
+ !i915_gem_object_type_has(obj,
+ I915_GEM_OBJECT_HAS_STRUCT_PAGE |
+ I915_GEM_OBJECT_HAS_IOMEM)) {
err = -ENODEV;
goto out;
}
@@ -2017,9 +2017,9 @@ int i915_reg_read_ioctl(struct drm_device *dev, void *data,
int remap_io_mapping(struct vm_area_struct *vma,
unsigned long addr, unsigned long pfn, unsigned long size,
struct io_mapping *iomap);
-int remap_io_sg_page(struct vm_area_struct *vma,
- unsigned long addr, unsigned long size,
- struct scatterlist *sgl);
+int remap_io_sg(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long size,
+ struct scatterlist *sgl, bool use_dma);
static inline int intel_hws_csb_write_index(struct drm_i915_private *i915)
{
@@ -35,6 +35,7 @@ struct remap_pfn {
pgprot_t prot;
struct sgt_iter sgt;
+ bool dma;
};
static int remap_pfn(pte_t *pte, unsigned long addr, void *data)
@@ -48,12 +49,15 @@ static int remap_pfn(pte_t *pte, unsigned long addr, void *data)
return 0;
}
-static inline unsigned long sgt_pfn(const struct sgt_iter *sgt)
+static inline unsigned long sgt_pfn(const struct sgt_iter *sgt, bool use_dma)
{
+ if (use_dma)
+ return (sgt->dma + sgt->curr) >> PAGE_SHIFT;
+
return sgt->pfn + (sgt->curr >> PAGE_SHIFT);
}
-static int remap_sg_page(pte_t *pte, unsigned long addr, void *data)
+static int remap_sg(pte_t *pte, unsigned long addr, void *data)
{
struct remap_pfn *r = data;
@@ -62,12 +66,12 @@ static int remap_sg_page(pte_t *pte, unsigned long addr, void *data)
/* Special PTE are not associated with any struct page */
set_pte_at(r->mm, addr, pte,
- pte_mkspecial(pfn_pte(sgt_pfn(&r->sgt), r->prot)));
+ pte_mkspecial(pfn_pte(sgt_pfn(&r->sgt, r->dma), r->prot)));
r->pfn++; /* track insertions in case we need to unwind later */
r->sgt.curr += PAGE_SIZE;
if (r->sgt.curr >= r->sgt.max)
- r->sgt = __sgt_iter(__sg_next(r->sgt.sgp), false);
+ r->sgt = __sgt_iter(__sg_next(r->sgt.sgp), r->dma);
return 0;
}
@@ -108,30 +112,34 @@ int remap_io_mapping(struct vm_area_struct *vma,
}
/**
- * remap_io_sg_page - remap an IO mapping to userspace
+ * remap_io_sg- remap an IO mapping to userspace
* @vma: user vma to map to
* @addr: target user address to start at
* @size: size of map area
* @sgl: Start sg entry
+ * @use_dma: Use stored dma address or pfn
*
* Note: this is only safe if the mm semaphore is held when called.
*/
-int remap_io_sg_page(struct vm_area_struct *vma,
- unsigned long addr, unsigned long size,
- struct scatterlist *sgl)
+int remap_io_sg(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long size,
+ struct scatterlist *sgl, bool use_dma)
{
struct remap_pfn r = {
.mm = vma->vm_mm,
.prot = vma->vm_page_prot,
- .sgt = __sgt_iter(sgl, false),
+ .sgt = __sgt_iter(sgl, use_dma),
+ .dma = use_dma,
};
int err;
/* We rely on prevalidation of the io-mapping to skip track_pfn(). */
GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS);
- flush_cache_range(vma, addr, size);
- err = apply_to_page_range(r.mm, addr, size, remap_sg_page, &r);
+ if (!use_dma)
+ flush_cache_range(vma, addr, size);
+
+ err = apply_to_page_range(r.mm, addr, size, remap_sg, &r);
if (unlikely(err)) {
zap_vma_ptes(vma, addr, r.pfn << PAGE_SHIFT);
return err;