@@ -1740,7 +1740,6 @@ static int read_from_scratch(struct i915_gem_context *ctx,
static int check_scratch_page(struct i915_gem_context *ctx, u32 *out)
{
struct i915_address_space *vm;
- struct page *page;
u32 *vaddr;
int err = 0;
@@ -1748,24 +1747,18 @@ static int check_scratch_page(struct i915_gem_context *ctx, u32 *out)
if (!vm)
return -ENODEV;
- page = __px_page(vm->scratch[0]);
- if (!page) {
+ if (!vm->scratch[0]) {
pr_err("No scratch page!\n");
return -EINVAL;
}
- vaddr = kmap(page);
- if (!vaddr) {
- pr_err("No (mappable) scratch page!\n");
- return -EINVAL;
- }
+ vaddr = __px_vaddr(vm->scratch[0]);
memcpy(out, vaddr, sizeof(*out));
if (memchr_inv(vaddr, *out, PAGE_SIZE)) {
pr_err("Inconsistent initial state of scratch page!\n");
err = -EINVAL;
}
- kunmap(page);
return err;
}
@@ -105,9 +105,8 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
* entries back to scratch.
*/
- vaddr = kmap_atomic_px(pt);
+ vaddr = px_vaddr(pt);
memset32(vaddr + pte, scratch_pte, count);
- kunmap_atomic(vaddr);
pte = 0;
}
@@ -129,7 +128,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
GEM_BUG_ON(!pd->entry[act_pt]);
- vaddr = kmap_atomic_px(i915_pt_entry(pd, act_pt));
+ vaddr = px_vaddr(i915_pt_entry(pd, act_pt));
do {
GEM_BUG_ON(sg_dma_len(iter.sg) < I915_GTT_PAGE_SIZE);
vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
@@ -145,12 +144,10 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
}
if (++act_pte == GEN6_PTES) {
- kunmap_atomic(vaddr);
- vaddr = kmap_atomic_px(i915_pt_entry(pd, ++act_pt));
+ vaddr = px_vaddr(i915_pt_entry(pd, ++act_pt));
act_pte = 0;
}
} while (1);
- kunmap_atomic(vaddr);
vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
}
@@ -244,7 +241,7 @@ static int gen6_ppgtt_init_scratch(struct gen6_ppgtt *ppgtt)
goto err_scratch0;
}
- ret = pin_pt_dma(vm, vm->scratch[1]);
+ ret = map_pt_dma(vm, vm->scratch[1]);
if (ret)
goto err_scratch1;
@@ -237,11 +237,10 @@ static u64 __gen8_ppgtt_clear(struct i915_address_space * const vm,
atomic_read(&pt->used));
GEM_BUG_ON(!count || count >= atomic_read(&pt->used));
- vaddr = kmap_atomic_px(pt);
+ vaddr = px_vaddr(pt);
memset64(vaddr + gen8_pd_index(start, 0),
vm->scratch[0]->encode,
count);
- kunmap_atomic(vaddr);
atomic_sub(count, &pt->used);
start += count;
@@ -370,7 +369,7 @@ gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
gen8_pte_t *vaddr;
pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2));
- vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
+ vaddr = px_vaddr(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
do {
GEM_BUG_ON(sg_dma_len(iter->sg) < I915_GTT_PAGE_SIZE);
vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma;
@@ -397,12 +396,10 @@ gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
}
clflush_cache_range(vaddr, PAGE_SIZE);
- kunmap_atomic(vaddr);
- vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
+ vaddr = px_vaddr(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
}
} while (1);
clflush_cache_range(vaddr, PAGE_SIZE);
- kunmap_atomic(vaddr);
return idx;
}
@@ -437,7 +434,7 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
encode |= GEN8_PDE_PS_2M;
page_size = I915_GTT_PAGE_SIZE_2M;
- vaddr = kmap_atomic_px(pd);
+ vaddr = px_vaddr(pd);
} else {
struct i915_page_table *pt =
i915_pt_entry(pd, __gen8_pte_index(start, 1));
@@ -452,7 +449,7 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE))
maybe_64K = __gen8_pte_index(start, 1);
- vaddr = kmap_atomic_px(pt);
+ vaddr = px_vaddr(pt);
}
do {
@@ -486,7 +483,6 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
} while (rem >= page_size && index < I915_PDES);
clflush_cache_range(vaddr, PAGE_SIZE);
- kunmap_atomic(vaddr);
/*
* Is it safe to mark the 2M block as 64K? -- Either we have
@@ -500,9 +496,8 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
!iter->sg && IS_ALIGNED(vma->node.start +
vma->node.size,
I915_GTT_PAGE_SIZE_2M)))) {
- vaddr = kmap_atomic_px(pd);
+ vaddr = px_vaddr(pd);
vaddr[maybe_64K] |= GEN8_PDE_IPS_64K;
- kunmap_atomic(vaddr);
page_size = I915_GTT_PAGE_SIZE_64K;
/*
@@ -518,12 +513,11 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
u16 i;
encode = vma->vm->scratch[0]->encode;
- vaddr = kmap_atomic_px(i915_pt_entry(pd, maybe_64K));
+ vaddr = px_vaddr(i915_pt_entry(pd, maybe_64K));
for (i = 1; i < index; i += 16)
memset64(vaddr + i, encode, 15);
- kunmap_atomic(vaddr);
}
}
@@ -592,7 +586,7 @@ static int gen8_init_scratch(struct i915_address_space *vm)
if (IS_ERR(obj))
goto free_scratch;
- ret = pin_pt_dma(vm, obj);
+ ret = map_pt_dma(vm, obj);
if (ret) {
i915_gem_object_put(obj);
goto free_scratch;
@@ -629,7 +623,7 @@ static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
if (IS_ERR(pde))
return PTR_ERR(pde);
- err = pin_pt_dma(vm, pde->pt.base);
+ err = map_pt_dma(vm, pde->pt.base);
if (err) {
i915_gem_object_put(pde->pt.base);
free_pd(vm, pde);
@@ -665,7 +659,7 @@ gen8_alloc_top_pd(struct i915_address_space *vm)
goto err_pd;
}
- err = pin_pt_dma(vm, pd->pt.base);
+ err = map_pt_dma(vm, pd->pt.base);
if (err)
goto err_pd;
@@ -616,7 +616,7 @@ static int init_aliasing_ppgtt(struct i915_ggtt *ggtt)
goto err_ppgtt;
i915_gem_object_lock(ppgtt->vm.scratch[0], NULL);
- err = i915_vm_pin_pt_stash(&ppgtt->vm, &stash);
+ err = i915_vm_map_pt_stash(&ppgtt->vm, &stash);
i915_gem_object_unlock(ppgtt->vm.scratch[0]);
if (err)
goto err_stash;
@@ -25,27 +25,25 @@ struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz)
return obj;
}
-int pin_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
+int map_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
{
- int err;
+ void *vaddr;
- i915_gem_object_lock(obj, NULL);
- err = i915_gem_object_pin_pages(obj);
- i915_gem_object_unlock(obj);
- if (err)
- return err;
+ vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
i915_gem_object_make_unshrinkable(obj);
return 0;
}
-int pin_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
+int map_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
{
- int err;
+ void *vaddr;
- err = i915_gem_object_pin_pages(obj);
- if (err)
- return err;
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
i915_gem_object_make_unshrinkable(obj);
return 0;
@@ -155,6 +153,14 @@ void clear_pages(struct i915_vma *vma)
memset(&vma->page_sizes, 0, sizeof(vma->page_sizes));
}
+void *__px_vaddr(struct drm_i915_gem_object *p)
+{
+ enum i915_map_type type;
+
+ GEM_BUG_ON(!i915_gem_object_has_pages(p));
+ return page_unpack_bits(p->mm.mapping, &type);
+}
+
dma_addr_t __px_dma(struct drm_i915_gem_object *p)
{
GEM_BUG_ON(!i915_gem_object_has_pages(p));
@@ -170,32 +176,22 @@ struct page *__px_page(struct drm_i915_gem_object *p)
void
fill_page_dma(struct drm_i915_gem_object *p, const u64 val, unsigned int count)
{
- struct page *page = __px_page(p);
- void *vaddr;
+ void *vaddr = __px_vaddr(p);
- vaddr = kmap(page);
memset64(vaddr, val, count);
clflush_cache_range(vaddr, PAGE_SIZE);
- kunmap(page);
}
static void poison_scratch_page(struct drm_i915_gem_object *scratch)
{
- struct sgt_iter sgt;
- struct page *page;
+ void *vaddr = __px_vaddr(scratch);
u8 val;
val = 0;
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
val = POISON_FREE;
- for_each_sgt_page(page, sgt, scratch->mm.pages) {
- void *vaddr;
-
- vaddr = kmap(page);
- memset(vaddr, val, PAGE_SIZE);
- kunmap(page);
- }
+ memset(vaddr, val, scratch->base.size);
}
int setup_scratch_page(struct i915_address_space *vm)
@@ -225,7 +221,7 @@ int setup_scratch_page(struct i915_address_space *vm)
if (IS_ERR(obj))
goto skip;
- if (pin_pt_dma(vm, obj))
+ if (map_pt_dma(vm, obj))
goto skip_obj;
/* We need a single contiguous page for our scratch */
@@ -176,6 +176,9 @@ struct page *__px_page(struct drm_i915_gem_object *p);
dma_addr_t __px_dma(struct drm_i915_gem_object *p);
#define px_dma(px) (__px_dma(px_base(px)))
+void *__px_vaddr(struct drm_i915_gem_object *p);
+#define px_vaddr(px) (__px_vaddr(px_base(px)))
+
#define px_pt(px) \
__px_choose_expr(px, struct i915_page_table *, __x, \
__px_choose_expr(px, struct i915_page_directory *, &__x->pt, \
@@ -506,8 +509,6 @@ struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt);
void i915_ggtt_suspend(struct i915_ggtt *gtt);
void i915_ggtt_resume(struct i915_ggtt *ggtt);
-#define kmap_atomic_px(px) kmap_atomic(__px_page(px_base(px)))
-
void
fill_page_dma(struct drm_i915_gem_object *p, const u64 val, unsigned int count);
@@ -525,8 +526,8 @@ struct i915_page_table *alloc_pt(struct i915_address_space *vm);
struct i915_page_directory *alloc_pd(struct i915_address_space *vm);
struct i915_page_directory *__alloc_pd(int npde);
-int pin_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj);
-int pin_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj);
+int map_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj);
+int map_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj);
void free_px(struct i915_address_space *vm,
struct i915_page_table *pt, int lvl);
@@ -573,7 +574,7 @@ void setup_private_pat(struct intel_uncore *uncore);
int i915_vm_alloc_pt_stash(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash,
u64 size);
-int i915_vm_pin_pt_stash(struct i915_address_space *vm,
+int i915_vm_map_pt_stash(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash);
void i915_vm_free_pt_stash(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash);
@@ -85,11 +85,10 @@ write_dma_entry(struct drm_i915_gem_object * const pdma,
const unsigned short idx,
const u64 encoded_entry)
{
- u64 * const vaddr = kmap_atomic(__px_page(pdma));
+ u64 * const vaddr = __px_vaddr(pdma);
vaddr[idx] = encoded_entry;
clflush_cache_range(&vaddr[idx], sizeof(u64));
- kunmap_atomic(vaddr);
}
void
@@ -254,7 +253,7 @@ int i915_vm_alloc_pt_stash(struct i915_address_space *vm,
return 0;
}
-int i915_vm_pin_pt_stash(struct i915_address_space *vm,
+int i915_vm_map_pt_stash(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash)
{
struct i915_page_table *pt;
@@ -262,7 +261,7 @@ int i915_vm_pin_pt_stash(struct i915_address_space *vm,
for (n = 0; n < ARRAY_SIZE(stash->pt); n++) {
for (pt = stash->pt[n]; pt; pt = pt->stash) {
- err = pin_pt_dma_locked(vm, pt->base);
+ err = map_pt_dma_locked(vm, pt->base);
if (err)
return err;
}
@@ -914,8 +914,7 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
if (err)
goto err_fence;
- err = i915_vm_pin_pt_stash(vma->vm,
- &work->stash);
+ err = i915_vm_map_pt_stash(vma->vm, &work->stash);
if (err)
goto err_fence;
}
@@ -185,7 +185,7 @@ static int igt_ppgtt_alloc(void *arg)
if (err)
goto err_ppgtt_cleanup;
- err = i915_vm_pin_pt_stash(&ppgtt->vm, &stash);
+ err = i915_vm_map_pt_stash(&ppgtt->vm, &stash);
if (err) {
i915_vm_free_pt_stash(&ppgtt->vm, &stash);
goto err_ppgtt_cleanup;
@@ -207,7 +207,7 @@ static int igt_ppgtt_alloc(void *arg)
if (err)
goto err_ppgtt_cleanup;
- err = i915_vm_pin_pt_stash(&ppgtt->vm, &stash);
+ err = i915_vm_map_pt_stash(&ppgtt->vm, &stash);
if (err) {
i915_vm_free_pt_stash(&ppgtt->vm, &stash);
goto err_ppgtt_cleanup;
@@ -324,11 +324,10 @@ static int lowlevel_hole(struct i915_address_space *vm,
BIT_ULL(size)))
goto alloc_vm_end;
- err = i915_vm_pin_pt_stash(vm, &stash);
+ err = i915_vm_map_pt_stash(vm, &stash);
if (!err)
vm->allocate_va_range(vm, &stash,
addr, BIT_ULL(size));
-
i915_vm_free_pt_stash(vm, &stash);
alloc_vm_end:
if (err == -EDEADLK) {
@@ -1966,10 +1965,9 @@ static int igt_cs_tlb(void *arg)
if (err)
goto end_ww;
- err = i915_vm_pin_pt_stash(vm, &stash);
+ err = i915_vm_map_pt_stash(vm, &stash);
if (!err)
vm->allocate_va_range(vm, &stash, offset, chunk_size);
-
i915_vm_free_pt_stash(vm, &stash);
end_ww:
if (err == -EDEADLK) {
@@ -307,7 +307,7 @@ static int live_noa_gpr(void *arg)
}
/* Poison the ce->vm so we detect writes not to the GGTT gt->scratch */
- scratch = kmap(__px_page(ce->vm->scratch[0]));
+ scratch = __px_vaddr(ce->vm->scratch[0]);
memset(scratch, POISON_FREE, PAGE_SIZE);
rq = intel_context_create_request(ce);
@@ -405,7 +405,6 @@ static int live_noa_gpr(void *arg)
out_rq:
i915_request_put(rq);
out_ce:
- kunmap(__px_page(ce->vm->scratch[0]));
intel_context_put(ce);
out:
stream_destroy(stream);