@@ -189,6 +189,8 @@ enum intel_guc_state_capture_event_status {
enum intel_guc_tlb_invalidation_type {
INTEL_GUC_TLB_INVAL_ENGINES = 0x0,
+ INTEL_GUC_TLB_INVAL_PAGE_SELECTIVE = 0x1,
+ INTEL_GUC_TLB_INVAL_PAGE_SELECTIVE_CTX = 0x2,
INTEL_GUC_TLB_INVAL_GUC = 0x3,
};
@@ -431,6 +431,17 @@ int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size,
int intel_guc_self_cfg32(struct intel_guc *guc, u16 key, u32 value);
int intel_guc_self_cfg64(struct intel_guc *guc, u16 key, u64 value);
+int intel_guc_g2g_register(struct intel_guc *guc);
+
+int intel_guc_invalidate_tlb_full(struct intel_guc *guc);
+int intel_guc_invalidate_tlb_page_selective(struct intel_guc *guc,
+ enum intel_guc_tlb_inval_mode mode,
+ u64 start, u64 length);
+int intel_guc_invalidate_tlb_page_selective_ctx(struct intel_guc *guc,
+ enum intel_guc_tlb_inval_mode mode,
+ u64 start, u64 length, u32 ctxid);
+int intel_guc_invalidate_tlb_guc(struct intel_guc *guc);
+
static inline bool intel_guc_is_supported(struct intel_guc *guc)
{
return intel_uc_fw_is_supported(&guc->fw);
@@ -4753,22 +4753,12 @@ static bool intel_gt_is_enabled(const struct intel_gt *gt)
return true;
}
-static int guc_send_invalidate_tlb(struct intel_guc *guc,
- enum intel_guc_tlb_invalidation_type type)
+static int guc_send_invalidate_tlb(struct intel_guc *guc, u32 *action, u32 size)
{
struct intel_guc_tlb_wait _wq, *wq = &_wq;
DEFINE_WAIT_FUNC(wait, woken_wake_function);
int err;
u32 seqno;
- u32 action[] = {
- INTEL_GUC_ACTION_TLB_INVALIDATION,
- 0,
- REG_FIELD_PREP(INTEL_GUC_TLB_INVAL_TYPE_MASK, type) |
- REG_FIELD_PREP(INTEL_GUC_TLB_INVAL_MODE_MASK,
- INTEL_GUC_TLB_INVAL_MODE_HEAVY) |
- INTEL_GUC_TLB_INVAL_FLUSH_CACHE,
- };
- u32 size = ARRAY_SIZE(action);
init_waitqueue_head(&_wq.wq);
@@ -4822,13 +4812,102 @@ static int guc_send_invalidate_tlb(struct intel_guc *guc,
/* Full TLB invalidation */
int intel_guc_invalidate_tlb_engines(struct intel_guc *guc)
{
- return guc_send_invalidate_tlb(guc, INTEL_GUC_TLB_INVAL_ENGINES);
+ u32 action[] = {
+ INTEL_GUC_ACTION_TLB_INVALIDATION,
+ 0,
+ REG_FIELD_PREP(INTEL_GUC_TLB_INVAL_TYPE_MASK,
+ INTEL_GUC_TLB_INVAL_ENGINES) |
+ REG_FIELD_PREP(INTEL_GUC_TLB_INVAL_MODE_MASK,
+ INTEL_GUC_TLB_INVAL_MODE_HEAVY) |
+ INTEL_GUC_TLB_INVAL_FLUSH_CACHE,
+ };
+ u32 size = ARRAY_SIZE(action);
+ return guc_send_invalidate_tlb(guc, action, size);
+}
+
+/*
+ * Selective TLB Invalidation for Address Range:
+ * TLB's in the Address Range is Invalidated across all engines.
+ */
+int intel_guc_invalidate_tlb_page_selective(struct intel_guc *guc,
+ enum intel_guc_tlb_inval_mode mode,
+ u64 start, u64 length)
+{
+ u64 vm_total = BIT_ULL(RUNTIME_INFO(guc_to_gt(guc)->i915)->ppgtt_size);
+
+ /*
+ * For page selective invalidations, this specifies the number of contiguous
+ * PPGTT pages that needs to be invalidated.
+ */
+ u32 address_mask = length >= vm_total ? 0 : ilog2(length) - ilog2(SZ_4K);
+ u32 action[] = {
+ INTEL_GUC_ACTION_TLB_INVALIDATION,
+ 0,
+ REG_FIELD_PREP(INTEL_GUC_TLB_INVAL_TYPE_MASK,
+ INTEL_GUC_TLB_INVAL_PAGE_SELECTIVE) |
+ REG_FIELD_PREP(INTEL_GUC_TLB_INVAL_MODE_MASK, mode) |
+ INTEL_GUC_TLB_INVAL_FLUSH_CACHE,
+ 0,
+ length >= vm_total ? 1 : lower_32_bits(start),
+ upper_32_bits(start),
+ address_mask,
+ };
+
+ GEM_BUG_ON(length < SZ_4K);
+ GEM_BUG_ON(!is_power_of_2(length));
+ GEM_BUG_ON(!IS_ALIGNED(start, length));
+ GEM_BUG_ON(range_overflows(start, length, vm_total));
+
+ return guc_send_invalidate_tlb(guc, action, ARRAY_SIZE(action));
+}
+
+/*
+ * Selective TLB Invalidation for Context:
+ * Invalidates all TLB's for a specific context across all engines.
+ */
+int intel_guc_invalidate_tlb_page_selective_ctx(struct intel_guc *guc,
+ enum intel_guc_tlb_inval_mode mode,
+ u64 start, u64 length, u32 ctxid)
+{
+ u64 vm_total = BIT_ULL(RUNTIME_INFO(guc_to_gt(guc)->i915)->ppgtt_size);
+ u32 address_mask = (ilog2(length) - ilog2(I915_GTT_PAGE_SIZE_4K));
+ u32 full_range = vm_total == length;
+ u32 action[] = {
+ INTEL_GUC_ACTION_TLB_INVALIDATION,
+ 0,
+ REG_FIELD_PREP(INTEL_GUC_TLB_INVAL_TYPE_MASK,
+ INTEL_GUC_TLB_INVAL_PAGE_SELECTIVE_CTX) |
+ REG_FIELD_PREP(INTEL_GUC_TLB_INVAL_MODE_MASK, mode) |
+ INTEL_GUC_TLB_INVAL_FLUSH_CACHE,
+ ctxid,
+ full_range ? full_range : lower_32_bits(start),
+ full_range ? 0 : upper_32_bits(start),
+ full_range ? 0 : address_mask,
+ };
+
+ GEM_BUG_ON(length < SZ_4K);
+ GEM_BUG_ON(!is_power_of_2(length));
+ GEM_BUG_ON(length & GENMASK(ilog2(SZ_16M) - 1, ilog2(SZ_2M) + 1));
+ GEM_BUG_ON(!IS_ALIGNED(start, length));
+ GEM_BUG_ON(range_overflows(start, length, vm_total));
+
+ return guc_send_invalidate_tlb(guc, action, ARRAY_SIZE(action));
}
/* GuC TLB Invalidation: Invalidate the TLB's of GuC itself. */
int intel_guc_invalidate_tlb_guc(struct intel_guc *guc)
{
- return guc_send_invalidate_tlb(guc, INTEL_GUC_TLB_INVAL_GUC);
+ u32 action[] = {
+ INTEL_GUC_ACTION_TLB_INVALIDATION,
+ 0,
+ REG_FIELD_PREP(INTEL_GUC_TLB_INVAL_TYPE_MASK,
+ INTEL_GUC_TLB_INVAL_GUC) |
+ REG_FIELD_PREP(INTEL_GUC_TLB_INVAL_MODE_MASK,
+ INTEL_GUC_TLB_INVAL_MODE_HEAVY) |
+ INTEL_GUC_TLB_INVAL_FLUSH_CACHE,
+ };
+ u32 size = ARRAY_SIZE(action);
+ return guc_send_invalidate_tlb(guc, action, size);
}
int intel_guc_deregister_done_process_msg(struct intel_guc *guc,