@@ -680,7 +680,8 @@ static int init_status_page(struct intel_engine_cs *engine)
if (ret)
goto err;
- vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ vaddr = i915_gem_object_pin_map(obj,
+ i915_coherent_map_type(engine->i915, obj, true));
if (IS_ERR(vaddr)) {
ret = PTR_ERR(vaddr);
goto err_unpin;
@@ -24,7 +24,7 @@ static void dbg_poison_ce(struct intel_context *ce)
if (ce->state) {
struct drm_i915_gem_object *obj = ce->state->obj;
- int type = i915_coherent_map_type(ce->engine->i915);
+ int type = i915_coherent_map_type(ce->engine->i915, obj, true);
void *map;
if (!i915_gem_object_trylock(ce->state->obj))
@@ -3535,8 +3535,8 @@ __execlists_context_pre_pin(struct intel_context *ce,
GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
*vaddr = i915_gem_object_pin_map(ce->state->obj,
- i915_coherent_map_type(ce->engine->i915) |
- I915_MAP_OVERRIDE);
+ i915_coherent_map_type(ce->engine->i915, ce->state->obj, false) |
+ I915_MAP_OVERRIDE);
if (IS_ERR(*vaddr))
return PTR_ERR(*vaddr);
@@ -51,9 +51,12 @@ int intel_ring_pin(struct intel_ring *ring, struct i915_gem_ww_ctx *ww)
if (i915_vma_is_map_and_fenceable(vma))
addr = (void __force *)i915_vma_pin_iomap(vma);
- else
- addr = i915_gem_object_pin_map(vma->obj,
- i915_coherent_map_type(vma->vm->i915));
+ else {
+ int type = i915_coherent_map_type(vma->vm->i915, vma->obj, false);
+
+ addr = i915_gem_object_pin_map(vma->obj, type);
+ }
+
if (IS_ERR(addr)) {
ret = PTR_ERR(addr);
goto err_ring;
@@ -31,6 +31,7 @@ static int __hwsp_alloc(struct intel_gt *gt, struct intel_timeline_hwsp *hwsp)
{
struct drm_i915_private *i915 = gt->i915;
struct drm_i915_gem_object *obj;
+ int type;
int ret;
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
@@ -47,7 +48,8 @@ static int __hwsp_alloc(struct intel_gt *gt, struct intel_timeline_hwsp *hwsp)
}
/* Pin early so we can call i915_ggtt_pin_unlocked(). */
- hwsp->vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ type = i915_coherent_map_type(i915, obj, true);
+ hwsp->vaddr = i915_gem_object_pin_map(obj, type);
if (IS_ERR(hwsp->vaddr)) {
ret = PTR_ERR(hwsp->vaddr);
goto out_unlock;
@@ -235,9 +237,11 @@ intel_timeline_pin_map(struct intel_timeline *timeline)
if (!timeline->hwsp_cacheline) {
struct drm_i915_gem_object *obj = timeline->hwsp_ggtt->obj;
u32 ofs = offset_in_page(timeline->hwsp_offset);
+ int type;
void *vaddr;
- vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ type = i915_coherent_map_type(timeline->gt->i915, obj, true);
+ vaddr = i915_gem_object_pin_map(obj, type);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);
@@ -89,7 +89,8 @@ static int __live_context_size(struct intel_engine_cs *engine)
goto err;
vaddr = i915_gem_object_pin_map_unlocked(ce->state->obj,
- i915_coherent_map_type(engine->i915));
+ i915_coherent_map_type(engine->i915,
+ ce->state->obj, false));
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
intel_context_unpin(ce);
@@ -5854,7 +5854,8 @@ static int compare_isolation(struct intel_engine_cs *engine,
}
lrc = i915_gem_object_pin_map_unlocked(ce->state->obj,
- i915_coherent_map_type(engine->i915));
+ i915_coherent_map_type(engine->i915,
+ ce->state->obj, true));
if (IS_ERR(lrc)) {
err = PTR_ERR(lrc);
goto err_B1;
@@ -88,7 +88,7 @@ static int hang_init(struct hang *h, struct intel_gt *gt)
h->seqno = memset(vaddr, 0xff, PAGE_SIZE);
vaddr = i915_gem_object_pin_map_unlocked(h->obj,
- i915_coherent_map_type(gt->i915));
+ i915_coherent_map_type(gt->i915, h->obj, false));
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
goto err_unpin_hws;
@@ -149,7 +149,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
return ERR_CAST(obj);
}
- vaddr = i915_gem_object_pin_map_unlocked(obj, i915_coherent_map_type(gt->i915));
+ vaddr = i915_gem_object_pin_map_unlocked(obj, i915_coherent_map_type(gt->i915, obj, false));
if (IS_ERR(vaddr)) {
i915_gem_object_put(obj);
i915_vm_put(vm);
@@ -694,7 +694,9 @@ int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size,
if (IS_ERR(vma))
return PTR_ERR(vma);
- vaddr = i915_gem_object_pin_map_unlocked(vma->obj, I915_MAP_WB);
+ vaddr = i915_gem_object_pin_map_unlocked(vma->obj,
+ i915_coherent_map_type(guc_to_gt(guc)->i915,
+ vma->obj, true));
if (IS_ERR(vaddr)) {
i915_vma_unpin_and_release(&vma, 0);
return PTR_ERR(vaddr);
@@ -82,7 +82,9 @@ static int intel_huc_rsa_data_create(struct intel_huc *huc)
if (IS_ERR(vma))
return PTR_ERR(vma);
- vaddr = i915_gem_object_pin_map_unlocked(vma->obj, I915_MAP_WB);
+ vaddr = i915_gem_object_pin_map_unlocked(vma->obj,
+ i915_coherent_map_type(gt->i915,
+ vma->obj, true));
if (IS_ERR(vaddr)) {
i915_vma_unpin_and_release(&vma, 0);
return PTR_ERR(vaddr);
@@ -78,6 +78,7 @@
#include "gem/i915_gem_context_types.h"
#include "gem/i915_gem_shrinker.h"
#include "gem/i915_gem_stolen.h"
+#include "gem/i915_gem_lmem.h"
#include "gt/intel_engine.h"
#include "gt/intel_gt_types.h"
@@ -2027,9 +2028,15 @@ static inline int intel_hws_csb_write_index(struct drm_i915_private *i915)
}
static inline enum i915_map_type
-i915_coherent_map_type(struct drm_i915_private *i915)
+i915_coherent_map_type(struct drm_i915_private *i915,
+ struct drm_i915_gem_object *obj, bool always_coherent)
{
- return HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
+ if (i915_gem_object_is_lmem(obj))
+ return I915_MAP_WC;
+ if (HAS_LLC(i915) || always_coherent)
+ return I915_MAP_WB;
+ else
+ return I915_MAP_WC;
}
static inline u64 i915_cs_timestamp_ns_to_ticks(struct drm_i915_private *i915, u64 val)
@@ -93,9 +93,9 @@ int igt_spinner_pin(struct igt_spinner *spin,
}
if (!spin->batch) {
- unsigned int mode =
- i915_coherent_map_type(spin->gt->i915);
+ unsigned int mode;
+ mode = i915_coherent_map_type(spin->gt->i915, spin->obj, false);
vaddr = igt_spinner_pin_obj(ce, ww, spin->obj, mode, &spin->batch_vma);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);