@@ -1036,6 +1036,15 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
return -EACCES;
}
+ if (node->readonly) {
+ if (vma->vm_flags & VM_WRITE) {
+ drm_gem_object_put_unlocked(obj);
+ return -EINVAL;
+ }
+
+ vma->vm_flags &= ~VM_MAYWRITE;
+ }
+
ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
vma);
@@ -2009,6 +2009,10 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
unsigned int flags;
int ret;
+ /* Sanity check that we allow writing into this object */
+ if (i915_gem_object_is_readonly(obj) && write)
+ return VM_FAULT_SIGBUS;
+
/* We don't use vmf->pgoff since that has the fake offset */
page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
@@ -206,7 +206,7 @@ static int ppgtt_bind_vma(struct i915_vma *vma,
/* Applicable to VLV, and gen8+ */
pte_flags = 0;
- if (vma->obj->gt_ro)
+ if (i915_gem_object_is_readonly(vma->obj))
pte_flags |= PTE_READ_ONLY;
vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
@@ -2423,8 +2423,10 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
const gen8_pte_t pte_encode = gen8_pte_encode(0, level, 0);
dma_addr_t addr;
- /* The GTT does not support read-only mappings */
- GEM_BUG_ON(flags & PTE_READ_ONLY);
+ /*
+ * Note that we ignore PTE_READ_ONLY here. The caller must be careful
+ * not to allow the user to override access to a read only page.
+ */
gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm;
gtt_entries += vma->node.start >> PAGE_SHIFT;
@@ -2663,7 +2665,7 @@ static int ggtt_bind_vma(struct i915_vma *vma,
/* Applicable to VLV (gen8+ do not support RO in the GGTT) */
pte_flags = 0;
- if (obj->gt_ro)
+ if (i915_gem_object_is_readonly(obj))
pte_flags |= PTE_READ_ONLY;
intel_runtime_pm_get(i915);
@@ -2701,7 +2703,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
/* Currently applicable only to VLV */
pte_flags = 0;
- if (vma->obj->gt_ro)
+ if (i915_gem_object_is_readonly(vma->obj))
pte_flags |= PTE_READ_ONLY;
if (flags & I915_VMA_LOCAL_BIND) {
@@ -141,7 +141,6 @@ struct drm_i915_gem_object {
* Is the object to be mapped as read-only to the GPU
* Only honoured if hardware has relevant pte bit
*/
- unsigned long gt_ro:1;
unsigned int cache_level:3;
unsigned int cache_coherent:2;
#define I915_BO_CACHE_COHERENT_FOR_READ BIT(0)
@@ -367,6 +366,18 @@ static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
reservation_object_unlock(obj->resv);
}
+static inline void
+i915_gem_object_set_readonly(struct drm_i915_gem_object *obj)
+{
+ obj->base.vma_node.readonly = true;
+}
+
+static inline bool
+i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj)
+{
+ return obj->base.vma_node.readonly;
+}
+
static inline bool
i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
{
@@ -1104,7 +1104,7 @@ intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
* if supported by the platform's GGTT.
*/
if (vm->has_read_only)
- obj->gt_ro = 1;
+ i915_gem_object_set_readonly(obj);
vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma))
@@ -476,7 +476,8 @@ static int igt_ctx_readonly(void *arg)
goto out_unlock;
}
- obj->gt_ro = prandom_u32_state(&prng);
+ if (prandom_u32_state(&prng) & 1)
+ i915_gem_object_set_readonly(obj);
}
intel_runtime_pm_get(i915);
@@ -507,7 +508,7 @@ static int igt_ctx_readonly(void *arg)
unsigned int num_writes;
num_writes = rem;
- if (obj->gt_ro)
+ if (i915_gem_object_is_readonly(obj))
num_writes = 0;
err = cpu_check(obj, num_writes);
@@ -41,6 +41,7 @@ struct drm_vma_offset_node {
rwlock_t vm_lock;
struct drm_mm_node vm_node;
struct rb_root vm_files;
+ bool readonly:1;
};
struct drm_vma_offset_manager {