@@ -1124,6 +1124,14 @@
#define GEN12_GAM_DONE _MMIO(0xcf68)
+#define XEHPSDV_TLB_INV_DESC0 _MMIO(0xcf7c)
+#define XEHPSDV_TLB_INV_DESC0_ADDR_LO REG_GENMASK(31, 12)
+#define XEHPSDV_TLB_INV_DESC0_ADDR_MASK REG_GENMASK(8, 3)
+#define XEHPSDV_TLB_INV_DESC0_G REG_GENMASK(2, 1)
+#define XEHPSDV_TLB_INV_DESC0_VALID REG_BIT(0)
+#define XEHPSDV_TLB_INV_DESC1 _MMIO(0xcf80)
+#define XEHPSDV_TLB_INV_DESC0_ADDR_HI REG_GENMASK(31, 0)
+
#define GEN7_HALF_SLICE_CHICKEN1 _MMIO(0xe100) /* IVB GT1 + VLV */
#define GEN8_HALF_SLICE_CHICKEN1 MCR_REG(0xe100)
#define GEN7_MAX_PS_THREAD_DEP (8 << 12)
@@ -157,6 +157,58 @@ void intel_gt_invalidate_tlb_full(struct intel_gt *gt, u32 seqno)
}
}
+static u64 tlb_page_selective_size(u64 *addr, u64 length)
+{
+ const u64 end = *addr + length;
+ u64 start;
+
+ /*
+ * Minimum invalidation size for a 2MB page that the hardware expects is
+ * 16MB
+ */
+ length = max_t(u64, roundup_pow_of_two(length), SZ_4K);
+ if (length >= SZ_2M)
+ length = max_t(u64, SZ_16M, length);
+
+ /*
+ * We need to invalidate a higher granularity if start address is not
+ * aligned to length. When start is not aligned with length we need to
+ * find the length large enough to create an address mask covering the
+ * required range.
+ */
+ start = round_down(*addr, length);
+ while (start + length < end) {
+ length <<= 1;
+ start = round_down(*addr, length);
+ }
+
+ *addr = start;
+ return length;
+}
+
+bool intel_gt_invalidate_tlb_range(struct intel_gt *gt,
+ u64 start, u64 length)
+{
+ struct intel_guc *guc = >->uc.guc;
+ intel_wakeref_t wakeref;
+ u64 size, vm_total;
+ bool ret = true;
+
+ if (intel_gt_is_wedged(gt))
+ return true;
+
+ vm_total = BIT_ULL(RUNTIME_INFO(gt->i915)->ppgtt_size);
+ /* Align start and length */
+ size = min_t(u64, vm_total, tlb_page_selective_size(&start, length));
+
+ with_intel_gt_pm_if_awake(gt, wakeref)
+ ret = intel_guc_invalidate_tlb_page_selective(guc,
+ INTEL_GUC_TLB_INVAL_MODE_HEAVY,
+ start, size) == 0;
+
+ return ret;
+}
+
void intel_gt_init_tlb(struct intel_gt *gt)
{
mutex_init(>->tlb.invalidate_lock);
@@ -12,6 +12,7 @@
#include "intel_gt_types.h"
void intel_gt_invalidate_tlb_full(struct intel_gt *gt, u32 seqno);
+bool intel_gt_invalidate_tlb_range(struct intel_gt *gt, u64 start, u64 length);
void intel_gt_init_tlb(struct intel_gt *gt);
void intel_gt_fini_tlb(struct intel_gt *gt);
@@ -382,10 +382,45 @@ static int invalidate_full(void *arg)
return err;
}
+static void tlbinv_range(struct i915_address_space *vm, u64 addr, u64 length)
+{
+ if (!intel_gt_invalidate_tlb_range(vm->gt, addr, length))
+ pr_err("range invalidate failed\n");
+}
+
+static bool has_invalidate_range(struct intel_gt *gt)
+{
+ intel_wakeref_t wf;
+ bool result = false;
+
+ with_intel_gt_pm(gt, wf)
+ result = intel_gt_invalidate_tlb_range(gt, 0, gt->vm->total);
+
+ return result;
+}
+
+static int invalidate_range(void *arg)
+{
+ struct intel_gt *gt = arg;
+ int err;
+
+ if (!has_invalidate_range(gt))
+ return 0;
+
+ err = mem_tlbinv(gt, create_smem, tlbinv_range);
+ if (err == 0)
+ err = mem_tlbinv(gt, create_lmem, tlbinv_range);
+ if (err == -ENODEV || err == -ENXIO)
+ err = 0;
+
+ return err;
+}
+
int intel_tlb_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(invalidate_full),
+ SUBTEST(invalidate_range),
};
struct intel_gt *gt;
unsigned int i;
@@ -403,3 +438,56 @@ int intel_tlb_live_selftests(struct drm_i915_private *i915)
return 0;
}
+
+static int tlb_page_size(void *arg)
+{
+ int start, size, offset;
+
+ for (start = 0; start < 57; start++) {
+ for (size = 0; size <= 57 - start; size++) {
+ for (offset = 0; offset <= size; offset++) {
+ u64 len = BIT(size);
+ u64 addr = BIT(start) + len - BIT(offset);
+ u64 expected_start = addr;
+ u64 expected_end = addr + len - 1;
+ int err = 0;
+
+ if (addr + len < addr)
+ continue;
+
+ len = tlb_page_selective_size(&addr, len);
+ if (addr > expected_start) {
+ pr_err("(start:%d, size:%d, offset:%d, range:[%llx, %llx]) invalidate range:[%llx + %llx] after start:%llx\n",
+ start, size, offset,
+ expected_start, expected_end,
+ addr, len,
+ expected_start);
+ err = -EINVAL;
+ }
+
+ if (addr + len < expected_end) {
+ pr_err("(start:%d, size:%d, offset:%d, range:[%llx, %llx]) invalidate range:[%llx + %llx] before end:%llx\n",
+ start, size, offset,
+ expected_start, expected_end,
+ addr, len,
+ expected_end);
+ err = -EINVAL;
+ }
+
+ if (err)
+ return err;
+ }
+ }
+ }
+
+ return 0;
+}
+
+int intel_tlb_mock_selftests(void)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(tlb_page_size),
+ };
+
+ return i915_subtests(tests, NULL);
+}
@@ -21,6 +21,7 @@ selftest(fence, i915_sw_fence_mock_selftests)
selftest(scatterlist, scatterlist_mock_selftests)
selftest(syncmap, i915_syncmap_mock_selftests)
selftest(uncore, intel_uncore_mock_selftests)
+selftest(tlb, intel_tlb_mock_selftests)
selftest(ring, intel_ring_mock_selftests)
selftest(engine, intel_engine_cs_mock_selftests)
selftest(timelines, intel_timeline_mock_selftests)