@@ -23,7 +23,7 @@ static void dbg_poison_ce(struct intel_context *ce)
if (ce->state) {
struct drm_i915_gem_object *obj = ce->state->obj;
- int type = i915_coherent_map_type(ce->engine->i915);
+ int type = i915_coherent_map_type(ce->engine->i915, obj, true);
void *map;
if (!i915_gem_object_trylock(obj))
@@ -903,7 +903,9 @@ lrc_pre_pin(struct intel_context *ce,
GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
*vaddr = i915_gem_object_pin_map(ce->state->obj,
- i915_coherent_map_type(ce->engine->i915) |
+ i915_coherent_map_type(ce->engine->i915,
+ ce->state->obj,
+ false) |
I915_MAP_OVERRIDE);
return PTR_ERR_OR_ZERO(*vaddr);
@@ -53,9 +53,12 @@ int intel_ring_pin(struct intel_ring *ring, struct i915_gem_ww_ctx *ww)
if (i915_vma_is_map_and_fenceable(vma))
addr = (void __force *)i915_vma_pin_iomap(vma);
- else
- addr = i915_gem_object_pin_map(vma->obj,
- i915_coherent_map_type(vma->vm->i915));
+ else {
+ int type = i915_coherent_map_type(vma->vm->i915, vma->obj, false);
+
+ addr = i915_gem_object_pin_map(vma->obj, type);
+ }
+
if (IS_ERR(addr)) {
ret = PTR_ERR(addr);
goto err_ring;
@@ -88,7 +88,8 @@ static int __live_context_size(struct intel_engine_cs *engine)
goto err;
vaddr = i915_gem_object_pin_map_unlocked(ce->state->obj,
- i915_coherent_map_type(engine->i915));
+ i915_coherent_map_type(engine->i915,
+ ce->state->obj, false));
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
intel_context_unpin(ce);
@@ -69,7 +69,7 @@ static int hang_init(struct hang *h, struct intel_gt *gt)
h->seqno = memset(vaddr, 0xff, PAGE_SIZE);
vaddr = i915_gem_object_pin_map_unlocked(h->obj,
- i915_coherent_map_type(gt->i915));
+ i915_coherent_map_type(gt->i915, h->obj, false));
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
goto err_unpin_hws;
@@ -130,7 +130,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
return ERR_CAST(obj);
}
- vaddr = i915_gem_object_pin_map_unlocked(obj, i915_coherent_map_type(gt->i915));
+ vaddr = i915_gem_object_pin_map_unlocked(obj, i915_coherent_map_type(gt->i915, obj, false));
if (IS_ERR(vaddr)) {
i915_gem_object_put(obj);
i915_vm_put(vm);
@@ -1221,7 +1221,9 @@ static int compare_isolation(struct intel_engine_cs *engine,
}
lrc = i915_gem_object_pin_map_unlocked(ce->state->obj,
- i915_coherent_map_type(engine->i915));
+ i915_coherent_map_type(engine->i915,
+ ce->state->obj,
+ false));
if (IS_ERR(lrc)) {
err = PTR_ERR(lrc);
goto err_B1;
@@ -682,7 +682,9 @@ int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size,
if (IS_ERR(vma))
return PTR_ERR(vma);
- vaddr = i915_gem_object_pin_map_unlocked(vma->obj, I915_MAP_WB);
+ vaddr = i915_gem_object_pin_map_unlocked(vma->obj,
+ i915_coherent_map_type(guc_to_gt(guc)->i915,
+ vma->obj, true));
if (IS_ERR(vaddr)) {
i915_vma_unpin_and_release(&vma, 0);
return PTR_ERR(vaddr);
@@ -82,7 +82,9 @@ static int intel_huc_rsa_data_create(struct intel_huc *huc)
if (IS_ERR(vma))
return PTR_ERR(vma);
- vaddr = i915_gem_object_pin_map_unlocked(vma->obj, I915_MAP_WB);
+ vaddr = i915_gem_object_pin_map_unlocked(vma->obj,
+ i915_coherent_map_type(gt->i915,
+ vma->obj, true));
if (IS_ERR(vaddr)) {
i915_vma_unpin_and_release(&vma, 0);
return PTR_ERR(vaddr);
@@ -78,6 +78,7 @@
#include "gem/i915_gem_context_types.h"
#include "gem/i915_gem_shrinker.h"
#include "gem/i915_gem_stolen.h"
+#include "gem/i915_gem_lmem.h"
#include "gt/intel_engine.h"
#include "gt/intel_gt_types.h"
@@ -1936,9 +1937,15 @@ static inline int intel_hws_csb_write_index(struct drm_i915_private *i915)
}
static inline enum i915_map_type
-i915_coherent_map_type(struct drm_i915_private *i915)
+i915_coherent_map_type(struct drm_i915_private *i915,
+ struct drm_i915_gem_object *obj, bool always_coherent)
{
- return HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
+ if (i915_gem_object_is_lmem(obj))
+ return I915_MAP_WC;
+ if (HAS_LLC(i915) || always_coherent)
+ return I915_MAP_WB;
+ else
+ return I915_MAP_WC;
}
#endif
@@ -94,9 +94,9 @@ int igt_spinner_pin(struct igt_spinner *spin,
}
if (!spin->batch) {
- unsigned int mode =
- i915_coherent_map_type(spin->gt->i915);
+ unsigned int mode;
+ mode = i915_coherent_map_type(spin->gt->i915, spin->obj, false);
vaddr = igt_spinner_pin_obj(ce, ww, spin->obj, mode, &spin->batch_vma);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);