@@ -3144,6 +3144,7 @@ static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
/**
* i915_gem_object_pin_map - return a contiguous mapping of the entire object
* @obj - the object to map into kernel address space
+ * &use_wc - whether the mapping should be using WC or WB pgprot_t
*
* Calls i915_gem_object_pin_pages() to prevent reaping of the object's
* pages and then returns a contiguous mapping of the backing storage into
@@ -3155,7 +3156,8 @@ static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
* Returns the pointer through which to access the mapped object, or an
* ERR_PTR() on error.
*/
-void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj);
+void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
+ bool use_wc);
/**
* i915_gem_object_unpin_map - releases an earlier mapping
@@ -2225,10 +2225,11 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
list_del(&obj->global_list);
if (obj->mapping) {
- if (is_vmalloc_addr(obj->mapping))
- vunmap(obj->mapping);
+ void *ptr = (void *)((uintptr_t)obj->mapping & ~1);
+ if (is_vmalloc_addr(ptr))
+ vunmap(ptr);
else
- kunmap(kmap_to_page(obj->mapping));
+ kunmap(kmap_to_page(ptr));
obj->mapping = NULL;
}
@@ -2401,7 +2402,8 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
}
/* The 'mapping' part of i915_gem_object_pin_map() below */
-static void *i915_gem_object_map(const struct drm_i915_gem_object *obj)
+static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
+ bool use_wc)
{
unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
struct sg_table *sgt = obj->pages;
@@ -2413,7 +2415,7 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj)
void *addr;
/* A single page can always be kmapped */
- if (n_pages == 1)
+ if (n_pages == 1 && !use_wc)
return kmap(sg_page(sgt->sgl));
if (n_pages > ARRAY_SIZE(stack_pages)) {
@@ -2429,7 +2431,8 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj)
/* Check that we have the expected number of pages */
GEM_BUG_ON(i != n_pages);
- addr = vmap(pages, n_pages, 0, PAGE_KERNEL);
+ addr = vmap(pages, n_pages, VM_NO_GUARD,
+ use_wc ? pgprot_writecombine(PAGE_KERNEL_IO) : PAGE_KERNEL);
if (pages != stack_pages)
drm_free_large(pages);
@@ -2438,27 +2441,55 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj)
}
/* get, pin, and map the pages of the object into kernel space */
-void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj)
+void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, bool use_wc)
{
+ void *ptr;
+ bool has_wc;
+ bool pinned;
int ret;
lockdep_assert_held(&obj->base.dev->struct_mutex);
+ GEM_BUG_ON((obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE) == 0);
ret = i915_gem_object_get_pages(obj);
if (ret)
return ERR_PTR(ret);
+ GEM_BUG_ON(obj->pages == NULL);
i915_gem_object_pin_pages(obj);
- if (!obj->mapping) {
- obj->mapping = i915_gem_object_map(obj);
- if (!obj->mapping) {
- i915_gem_object_unpin_pages(obj);
- return ERR_PTR(-ENOMEM);
+ pinned = (obj->pages_pin_count > 1);
+ ptr = (void *)((uintptr_t)obj->mapping & ~1);
+ has_wc = (uintptr_t)obj->mapping & 1;
+
+ if (ptr && has_wc != use_wc) {
+ if (pinned) {
+ ret = -EBUSY;
+ goto err;
+ }
+
+ if (is_vmalloc_addr(ptr))
+ vunmap(ptr);
+ else
+ kunmap(kmap_to_page(ptr));
+ ptr = obj->mapping = NULL;
+ }
+
+ if (!ptr) {
+ ptr = i915_gem_object_map(obj, use_wc);
+ if (!ptr) {
+ ret = -ENOMEM;
+ goto err;
}
+
+ obj->mapping = (void *)((uintptr_t)ptr | use_wc);
}
- return obj->mapping;
+ return ptr;
+
+err:
+ i915_gem_object_unpin_pages(obj);
+ return ERR_PTR(ret);
}
void i915_vma_move_to_active(struct i915_vma *vma,
@@ -115,7 +115,7 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
if (ret)
return ERR_PTR(ret);
- addr = i915_gem_object_pin_map(obj);
+ addr = i915_gem_object_pin_map(obj, false);
mutex_unlock(&dev->struct_mutex);
return addr;
@@ -952,7 +952,7 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx,
if (ret)
goto err;
- vaddr = i915_gem_object_pin_map(ce->state);
+ vaddr = i915_gem_object_pin_map(ce->state, false);
if (IS_ERR(vaddr)) {
ret = PTR_ERR(vaddr);
goto unpin_ctx_obj;
@@ -1939,7 +1939,7 @@ lrc_setup_hws(struct intel_engine_cs *engine,
/* The HWSP is part of the default context object in LRC mode. */
engine->status_page.gfx_addr = i915_gem_obj_ggtt_offset(dctx_obj) +
LRC_PPHWSP_PN * PAGE_SIZE;
- hws = i915_gem_object_pin_map(dctx_obj);
+ hws = i915_gem_object_pin_map(dctx_obj, false);
if (IS_ERR(hws))
return PTR_ERR(hws);
engine->status_page.page_addr = hws + LRC_PPHWSP_PN * PAGE_SIZE;
@@ -2291,7 +2291,7 @@ populate_lr_context(struct i915_gem_context *ctx,
return ret;
}
- vaddr = i915_gem_object_pin_map(ctx_obj);
+ vaddr = i915_gem_object_pin_map(ctx_obj, false);
if (IS_ERR(vaddr)) {
ret = PTR_ERR(vaddr);
DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
@@ -2525,7 +2525,7 @@ void intel_lr_context_reset(struct drm_i915_private *dev_priv,
if (!ctx_obj)
continue;
- vaddr = i915_gem_object_pin_map(ctx_obj);
+ vaddr = i915_gem_object_pin_map(ctx_obj, false);
if (WARN_ON(IS_ERR(vaddr)))
continue;
@@ -2131,7 +2131,7 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_i915_private *dev_priv,
if (ret)
goto err_unpin;
- addr = i915_gem_object_pin_map(obj);
+ addr = i915_gem_object_pin_map(obj, false);
if (IS_ERR(addr)) {
ret = PTR_ERR(addr);
goto err_unpin;