@@ -405,9 +405,12 @@ static void __pdp_fini(struct i915_pagedirpo *pdp)
static void free_pdp_single(struct i915_pagedirpo *pdp,
struct drm_device *dev)
{
- __pdp_fini(pdp);
- if (HAS_48B_PPGTT(dev))
+ if (HAS_48B_PPGTT(dev)) {
+ __pdp_fini(pdp);
+ i915_dma_unmap_single(pdp, dev);
+ __free_page(pdp->page);
kfree(pdp);
+ }
}
static int __pdp_init(struct i915_pagedirpo *pdp,
@@ -433,6 +436,60 @@ static int __pdp_init(struct i915_pagedirpo *pdp,
return 0;
}
+static struct i915_pagedirpo *alloc_pdp_single(struct i915_hw_ppgtt *ppgtt,
+ struct i915_pml4 *pml4)
+{
+ struct drm_device *dev = ppgtt->base.dev;
+ struct i915_pagedirpo *pdp;
+ int ret;
+
+ BUG_ON(!HAS_48B_PPGTT(dev));
+
+ pdp = kmalloc(sizeof(*pdp), GFP_KERNEL);
+ if (!pdp)
+ return ERR_PTR(-ENOMEM);
+
+ pdp->page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
+ if (!pdp->page) {
+ kfree(pdp);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ret = __pdp_init(pdp, dev);
+ if (ret) {
+ __free_page(pdp->page);
+ kfree(pdp);
+ return ERR_PTR(ret);
+ }
+
+ i915_dma_map_px_single(pdp, dev);
+
+ return pdp;
+}
+
+static void pml4_fini(struct i915_pml4 *pml4)
+{
+ struct i915_hw_ppgtt *ppgtt =
+ container_of(pml4, struct i915_hw_ppgtt, pml4);
+ i915_dma_unmap_single(pml4, ppgtt->base.dev);
+ __free_page(pml4->page);
+ /* HACK */
+ pml4->page = NULL;
+}
+
+static int pml4_init(struct i915_hw_ppgtt *ppgtt)
+{
+ struct i915_pml4 *pml4 = &ppgtt->pml4;
+
+ pml4->page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!pml4->page)
+ return -ENOMEM;
+
+ i915_dma_map_px_single(pml4, ppgtt->base.dev);
+
+ return 0;
+}
+
/* Broadwell Page Directory Pointer Descriptors */
static int gen8_write_pdp(struct intel_engine_cs *ring,
unsigned entry,
@@ -611,7 +668,7 @@ static void gen8_map_pagetable_range(struct i915_address_space *vm,
kunmap_atomic(pagedir);
}
-static void gen8_teardown_va_range_3lvl(struct i915_address_space *vm,
+static bool gen8_teardown_va_range_3lvl(struct i915_address_space *vm,
struct i915_pagedirpo *pdp,
uint64_t start, uint64_t length,
bool dead)
@@ -620,14 +677,23 @@ static void gen8_teardown_va_range_3lvl(struct i915_address_space *vm,
struct i915_pagedir *pd;
struct i915_pagetab *pt;
uint64_t temp;
- uint32_t pdpe, pde;
+ uint32_t pdpe, pde, orig_start = start;
BUG_ON(!pdp);
+
+ if (pdp->zombie) {
+ free_pdp_single(pdp, dev);
+ trace_i915_pagedirpo_destroy(vm, 0,
+ orig_start & GENMASK_ULL(64, GEN8_PML4E_SHIFT),
+ GEN8_PML4E_SHIFT);
+ return true;
+ }
+
if (!pdp->pagedirs) {
WARN(!bitmap_empty(pdp->used_pdpes, I915_PDPES_PER_PDP(dev)),
"Page directory leak detected\n");
/* If pagedirs are already free, there is nothing to do.*/
- return;
+ return false;
}
gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) {
@@ -716,8 +782,18 @@ static void gen8_teardown_va_range_3lvl(struct i915_address_space *vm,
}
}
- if (dead && bitmap_empty(pdp->used_pdpes, I915_PDPES_PER_PDP(dev)))
- free_pdp_single(pdp, dev);
+ if (bitmap_empty(pdp->used_pdpes, I915_PDPES_PER_PDP(dev))) {
+ if (!dead) {
+ pdp->zombie = 1;
+ } else {
+ free_pdp_single(pdp, dev);
+ trace_i915_pagedirpo_destroy(vm, 0,
+ orig_start & GENMASK_ULL(64, GEN8_PML4E_SHIFT),
+ GEN8_PML4E_SHIFT);
+ }
+ return true;
+ }
+ return false;
}
static void gen8_teardown_va_range_4lvl(struct i915_address_space *vm,
@@ -725,19 +801,49 @@ static void gen8_teardown_va_range_4lvl(struct i915_address_space *vm,
uint64_t start, uint64_t length,
bool dead)
{
- BUG();
+ struct i915_pagedirpo *pdp;
+ uint64_t temp, pml4e;
+
+ gen8_for_each_pml4e(pdp, pml4, start, length, temp, pml4e) {
+ if (!pdp)
+ continue;
+
+ if (gen8_teardown_va_range_3lvl(vm, pdp, start, length, dead)) {
+ clear_bit(pml4e, pml4->used_pml4es);
+ pml4->pdps[pml4e] = NULL;
+ }
+
+ WARN_ON(!test_bit(pml4e, pml4->used_pml4es) && !pdp->zombie);
+ WARN_ON(test_bit(pml4e, pml4->used_pml4es) && pdp->zombie);
+ }
}
-static void gen8_teardown_va_range(struct i915_address_space *vm,
- uint64_t start, uint64_t length)
+static void __gen8_teardown_va_range(struct i915_address_space *vm,
+ uint64_t start, uint64_t length,
+ bool dead)
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
- if (!HAS_48B_PPGTT(vm->dev))
- gen8_teardown_va_range_3lvl(vm, &ppgtt->pdp, start, length, false);
- else
- gen8_teardown_va_range_4lvl(vm, &ppgtt->pml4, start, length, false);
+ if (!HAS_48B_PPGTT(vm->dev)) {
+ gen8_teardown_va_range_3lvl(vm, &ppgtt->pdp, start, length, dead);
+ if (dead) {
+ WARN_ON(!bitmap_empty(ppgtt->pdp.used_pdpes, I915_PDPES_PER_PDP(vm->dev)));
+ __pdp_fini(&ppgtt->pdp);
+ }
+ } else {
+ gen8_teardown_va_range_4lvl(vm, &ppgtt->pml4, start, length, dead);
+ if (dead) {
+ WARN_ON(!bitmap_empty(ppgtt->pml4.used_pml4es, GEN8_PML4ES_PER_PML4));
+ pml4_fini(&ppgtt->pml4);
+ }
+ }
+}
+
+static void gen8_teardown_va_range(struct i915_address_space *vm,
+ uint64_t start, uint64_t length)
+{
+ __gen8_teardown_va_range(vm, start, length, false);
}
static void gen8_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
@@ -745,10 +851,12 @@ static void gen8_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
trace_i915_va_teardown(&ppgtt->base,
ppgtt->base.start, ppgtt->base.total,
VM_TO_TRACE_NAME(&ppgtt->base));
- gen8_teardown_va_range_3lvl(&ppgtt->base, &ppgtt->pdp,
- ppgtt->base.start, ppgtt->base.total,
- true);
- BUG_ON(ppgtt->pdp.pagedirs); /* FIXME: 48b */
+ __gen8_teardown_va_range(&ppgtt->base,
+ ppgtt->base.start, ppgtt->base.total, true);
+ if (!HAS_48B_PPGTT(ppgtt->base.dev))
+ BUG_ON(ppgtt->pdp.pagedirs);
+ else
+ BUG_ON(ppgtt->pml4.page);
}
static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
@@ -1029,12 +1137,81 @@ err_out:
return ret;
}
-static int __noreturn gen8_alloc_va_range_4lvl(struct i915_address_space *vm,
- struct i915_pml4 *pml4,
- uint64_t start,
- uint64_t length)
+static int gen8_alloc_va_range_4lvl(struct i915_address_space *vm,
+ struct i915_pml4 *pml4,
+ uint64_t start,
+ uint64_t length)
{
- BUG();
+ DECLARE_BITMAP(new_pdps, GEN8_PML4ES_PER_PML4);
+ struct i915_hw_ppgtt *ppgtt =
+ container_of(vm, struct i915_hw_ppgtt, base);
+ struct i915_pagedirpo *pdp;
+ const uint64_t orig_start = start;
+ const uint64_t orig_length = length;
+ uint64_t temp, pml4e;
+
+ /* Do the pml4 allocations first, so we don't need to track the newly
+ * allocated tables below the pdp */
+ bitmap_zero(new_pdps, GEN8_PML4ES_PER_PML4);
+
+ /* The pagedirectory and pagetable allocations are done in the shared 3
+ * and 4 level code. Just allocate the pdps.
+ */
+ gen8_for_each_pml4e(pdp, pml4, start, length, temp, pml4e) {
+ if (!pdp) {
+ WARN_ON(test_bit(pml4e, pml4->used_pml4es));
+ pdp = alloc_pdp_single(ppgtt, pml4);
+ if (IS_ERR(pdp))
+ goto err_alloc;
+
+ pml4->pdps[pml4e] = pdp;
+ set_bit(pml4e, new_pdps);
+ trace_i915_pagedirpo_alloc(&ppgtt->base, pml4e,
+ pml4e << GEN8_PML4E_SHIFT,
+ GEN8_PML4E_SHIFT);
+
+ } else {
+ WARN(!pdp->zombie &&
+ !test_bit(pml4e, pml4->used_pml4es), "%lld %p", pml4e, vm);
+ }
+ }
+
+ WARN(bitmap_weight(new_pdps, GEN8_PML4ES_PER_PML4) > 2,
+ "The allocation has spanned more than 512GB. "
+ "It is highly likely this is incorrect.");
+
+ start = orig_start;
+ length = orig_length;
+
+ gen8_for_each_pml4e(pdp, pml4, start, length, temp, pml4e) {
+ int ret;
+
+ BUG_ON(!pdp);
+
+ ret = gen8_alloc_va_range_3lvl(vm, pdp, start, length);
+ if (ret)
+ goto err_out;
+ }
+
+ bitmap_or(pml4->used_pml4es, new_pdps, pml4->used_pml4es,
+ GEN8_PML4ES_PER_PML4);
+
+ for_each_set_bit(pml4e, pml4->used_pml4es, GEN8_PML4ES_PER_PML4)
+ pml4->pdps[pml4e]->zombie = 0;
+
+ return 0;
+
+err_out:
+ /* This will teardown more than we allocated. It should be fine, and
+ * makes code simpler. */
+ start = orig_start;
+ length = orig_length;
+ gen8_for_each_pml4e(pdp, pml4, start, length, temp, pml4e)
+ gen8_teardown_va_range_3lvl(vm, pdp, start, length, false);
+
+err_alloc:
+ for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4)
+ free_pdp_single(pdp, vm->dev);
}
static int gen8_alloc_va_range(struct i915_address_space *vm,
@@ -1043,16 +1220,19 @@ static int gen8_alloc_va_range(struct i915_address_space *vm,
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
- if (!HAS_48B_PPGTT(vm->dev))
- return gen8_alloc_va_range_3lvl(vm, &ppgtt->pdp, start, length);
- else
+ if (HAS_48B_PPGTT(vm->dev))
return gen8_alloc_va_range_4lvl(vm, &ppgtt->pml4, start, length);
+ else
+ return gen8_alloc_va_range_3lvl(vm, &ppgtt->pdp, start, length);
}
static void gen8_ppgtt_fini_common(struct i915_hw_ppgtt *ppgtt)
{
free_pt_scratch(ppgtt->scratch_pd, ppgtt->base.dev);
- free_pdp_single(&ppgtt->pdp, ppgtt->base.dev);
+ if (HAS_48B_PPGTT(ppgtt->base.dev))
+ pml4_fini(&ppgtt->pml4);
+ else
+ free_pdp_single(&ppgtt->pdp, ppgtt->base.dev);
}
/**
@@ -1076,7 +1256,13 @@ static int gen8_ppgtt_init_common(struct i915_hw_ppgtt *ppgtt, uint64_t size)
ppgtt->enable = gen8_ppgtt_enable;
ppgtt->switch_mm = gen8_mm_switch;
- if (!HAS_48B_PPGTT(ppgtt->base.dev)) {
+ if (HAS_48B_PPGTT(ppgtt->base.dev)) {
+ int ret = pml4_init(ppgtt);
+ if (ret) {
+ free_pt_scratch(ppgtt->scratch_pd, ppgtt->base.dev);
+ return ret;
+ }
+ } else {
int ret = __pdp_init(&ppgtt->pdp, false);
if (ret) {
free_pt_scratch(ppgtt->scratch_pd, ppgtt->base.dev);
@@ -1084,8 +1270,8 @@ static int gen8_ppgtt_init_common(struct i915_hw_ppgtt *ppgtt, uint64_t size)
}
ppgtt->switch_mm = gen8_mm_switch;
- } else
- BUG(); /* Not yet implemented */
+ trace_i915_pagedirpo_alloc(&ppgtt->base, 0, 0, GEN8_PML4E_SHIFT);
+ }
return 0;
}
@@ -103,6 +103,7 @@ typedef gen8_gtt_pte_t gen8_ppgtt_pde_t;
#endif
#define GEN8_PML4ES_PER_PML4 512
#define GEN8_PML4E_SHIFT 39
+#define GEN8_PML4E_MASK (GEN8_PML4ES_PER_PML4 - 1)
#define GEN8_PDPE_SHIFT 30
/* NB: GEN8_PDPE_MASK is untrue for 32b platforms, but it has no impact on 32b page
* tables */
@@ -226,6 +227,7 @@ struct i915_pagedirpo {
dma_addr_t daddr;
unsigned long *used_pdpes;
struct i915_pagedir **pagedirs;
+ unsigned zombie:1;
};
struct i915_pml4 {
@@ -233,6 +235,7 @@ struct i915_pml4 {
dma_addr_t daddr;
DECLARE_BITMAP(used_pml4es, GEN8_PML4ES_PER_PML4);
struct i915_pagedirpo *pdps[GEN8_PML4ES_PER_PML4];
+ /* Don't bother tracking zombie. Just always leave it around */
};
struct i915_address_space {
@@ -455,9 +458,18 @@ static inline size_t gen6_pde_count(uint32_t addr, uint32_t length)
temp = min(temp, length), \
start += temp, length -= temp)
+#define gen8_for_each_pml4e(pdp, pml4, start, length, temp, iter) \
+ for (iter = gen8_pml4e_index(start), pdp = (pml4)->pdps[iter]; \
+ length > 0 && iter < GEN8_PML4ES_PER_PML4; \
+ pdp = (pml4)->pdps[++iter], \
+ temp = ALIGN(start+1, 1ULL << GEN8_PML4E_SHIFT) - start, \
+ temp = min(temp, length), \
+ start += temp, length -= temp)
+
#define gen8_for_each_pdpe(pd, pdp, start, length, temp, iter) \
gen8_for_each_pdpe_e(pd, pdp, start, length, temp, iter, I915_PDPES_PER_PDP(dev))
+
/* Clamp length to the next pagetab boundary */
static inline uint64_t gen8_clamp_pt(uint64_t start, uint64_t length)
{
@@ -495,7 +507,7 @@ static inline uint32_t gen8_pdpe_index(uint64_t address)
static inline uint32_t gen8_pml4e_index(uint64_t address)
{
- BUG();
+ return (address >> GEN8_PML4E_SHIFT) & GEN8_PML4E_MASK;
}
static inline size_t gen8_pte_count(uint64_t addr, uint64_t length)
The code for 4lvl works just as one would expect, and nicely it is able to call into the existing 3lvl page table code to handle all of the lower levels. PML4 has no special attributes. We do not track its zombie status because there will always be a PML4. So simply initialize it at creation, and destroy it at teardown. (A similar argument can be made for PDPs when not using sparse addresses). Almost none of the fanciness here will exercised since the switch isn't flipped until later. Signed-off-by: Ben Widawsky <ben@bwidawsk.net> --- drivers/gpu/drm/i915/i915_gem_gtt.c | 246 +++++++++++++++++++++++++++++++----- drivers/gpu/drm/i915/i915_gem_gtt.h | 14 +- 2 files changed, 229 insertions(+), 31 deletions(-)