@@ -2850,13 +2850,15 @@ static int init_ggtt(struct i915_ggtt *ggtt)
if (ret)
return ret;
- /* Reserve a mappable slot for our lockless error capture */
- ret = drm_mm_insert_node_in_range(&ggtt->vm.mm, &ggtt->error_capture,
- PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
- 0, ggtt->mappable_end,
- DRM_MM_INSERT_LOW);
- if (ret)
- return ret;
+ if (HAS_MAPPABLE_APERTURE(ggtt->vm.i915)) {
+ /* Reserve a mappable slot for our lockless error capture */
+ ret = drm_mm_insert_node_in_range(&ggtt->vm.mm, &ggtt->error_capture,
+ PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
+ 0, ggtt->mappable_end,
+ DRM_MM_INSERT_LOW);
+ if (ret)
+ return ret;
+ }
/*
* The upper portion of the GuC address space has a sizeable hole
@@ -218,7 +218,7 @@ struct compress {
void *tmp;
};
-static bool compress_init(struct compress *c)
+static bool compress_init(struct compress *c, bool wc)
{
struct z_stream_s *zstream = memset(&c->zstream, 0, sizeof(c->zstream));
@@ -234,7 +234,7 @@ static bool compress_init(struct compress *c)
}
c->tmp = NULL;
- if (i915_has_memcpy_from_wc())
+ if (wc && i915_has_memcpy_from_wc())
c->tmp = (void *)__get_free_page(GFP_ATOMIC | __GFP_NOWARN);
return true;
@@ -335,10 +335,12 @@ static void err_compression_marker(struct drm_i915_error_state_buf *m)
#else
struct compress {
+ bool wc;
};
-static bool compress_init(struct compress *c)
+static bool compress_init(struct compress *c, bool wc)
{
+ c->wc = wc;
return true;
}
@@ -354,7 +356,7 @@ static int compress_page(struct compress *c,
return -ENOMEM;
ptr = (void *)page;
- if (!i915_memcpy_from_wc(ptr, src, PAGE_SIZE))
+ if (!(c->wc && i915_memcpy_from_wc(ptr, src, PAGE_SIZE)))
memcpy(ptr, src, PAGE_SIZE);
dst->pages[dst->page_count++] = ptr;
@@ -998,7 +1000,6 @@ i915_error_object_create(struct drm_i915_private *i915,
struct compress compress;
unsigned long num_pages;
struct sgt_iter iter;
- dma_addr_t dma;
int ret;
if (!vma || !vma->pages)
@@ -1017,22 +1018,52 @@ i915_error_object_create(struct drm_i915_private *i915,
dst->page_count = 0;
dst->unused = 0;
- if (!compress_init(&compress)) {
+ if (!compress_init(&compress, drm_mm_node_allocated(&ggtt->error_capture))) {
kfree(dst);
return NULL;
}
ret = -EINVAL;
- for_each_sgt_dma(dma, iter, vma->pages) {
+ if (drm_mm_node_allocated(&ggtt->error_capture)) {
void __iomem *s;
+ dma_addr_t dma;
- ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0);
+ for_each_sgt_dma(dma, iter, vma->pages) {
+ ggtt->vm.insert_page(&ggtt->vm, dma, slot,
+ I915_CACHE_NONE, 0);
- s = io_mapping_map_atomic_wc(&ggtt->iomap, slot);
- ret = compress_page(&compress, (void __force *)s, dst);
- io_mapping_unmap_atomic(s);
- if (ret)
- break;
+ s = io_mapping_map_atomic_wc(&ggtt->iomap, slot);
+ ret = compress_page(&compress, (void __force *)s, dst);
+ io_mapping_unmap_atomic(s);
+
+ if (ret)
+ break;
+ }
+ } else if (i915_gem_object_is_lmem(vma->obj)) {
+ void *s;
+ dma_addr_t dma;
+ struct intel_memory_region *mem = vma->obj->memory_region;
+
+ for_each_sgt_dma(dma, iter, vma->pages) {
+ s = io_mapping_map_atomic_wc(&mem->iomap, dma);
+ ret = compress_page(&compress, s, dst);
+ io_mapping_unmap_atomic(s);
+
+ if (ret)
+ break;
+ }
+ } else {
+ void *s;
+ struct page *page;
+
+ for_each_sgt_page(page, iter, vma->pages) {
+ s = kmap_atomic(page);
+ ret = compress_page(&compress, s, dst);
+ kunmap_atomic(s);
+
+ if (ret)
+ break;
+ }
}
if (ret || compress_flush(&compress, dst)) {
@@ -1745,9 +1776,11 @@ static unsigned long capture_find_epoch(const struct i915_gpu_state *error)
static void capture_finish(struct i915_gpu_state *error)
{
struct i915_ggtt *ggtt = &error->i915->ggtt;
- const u64 slot = ggtt->error_capture.start;
- ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
+ if (drm_mm_node_allocated(&ggtt->error_capture)) {
+ const u64 slot = ggtt->error_capture.start;
+ ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
+ }
}
static int capture(void *data)