diff mbox series

[v1,02/10] drm/i915: get ready of memory for pvmmio

Message ID 1539238452-4389-2-git-send-email-xiaolin.zhang@intel.com (mailing list archive)
State New, archived
Headers show
Series i915 pvmmio to improve GVTg performance | expand

Commit Message

Xiaolin Zhang Oct. 11, 2018, 6:14 a.m. UTC
To enable pvmmio feature, we need to prepare one 4K shared page
which will be accessed by both guest and backend i915 driver.

guest i915 allocate one page memory and then the guest physical address is
passed to backend i915 driver through PVINFO register so that backend i915
driver can access this shared page without hypeviser trap cost for shared
data exchagne via hyperviser read_gpa functionality.

v1: addressed RFC comment to move both shared_page_lock and shared_page
to i915_virtual_gpu structure
v0: RFC

Signed-off-by: Xiaolin Zhang <xiaolin.zhang@intel.com>
---
 drivers/gpu/drm/i915/i915_drv.c    |  4 ++++
 drivers/gpu/drm/i915/i915_drv.h    |  2 ++
 drivers/gpu/drm/i915/i915_pvinfo.h | 24 +++++++++++++++++++++++-
 drivers/gpu/drm/i915/i915_vgpu.c   | 19 ++++++++++++++++++-
 4 files changed, 47 insertions(+), 2 deletions(-)
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 19302342..72669f4 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -987,6 +987,8 @@  static void i915_mmio_cleanup(struct drm_i915_private *dev_priv)
 
 	intel_teardown_mchbar(dev_priv);
 	pci_iounmap(pdev, dev_priv->regs);
+	if (intel_vgpu_active(dev_priv) && dev_priv->vgpu.shared_page)
+		free_page((unsigned long)dev_priv->vgpu.shared_page);
 }
 
 /**
@@ -1029,6 +1031,8 @@  static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
 	return 0;
 
 err_uncore:
+	if (intel_vgpu_active(dev_priv) && dev_priv->vgpu.shared_page)
+		free_page((unsigned long)dev_priv->vgpu.shared_page);
 	intel_uncore_fini(dev_priv);
 err_bridge:
 	pci_dev_put(dev_priv->bridge_dev);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index d22154a..2c131d4 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1345,6 +1345,8 @@  struct i915_virtual_gpu {
 	bool active;
 	u32 caps;
 	u32 pv_caps;
+	spinlock_t shared_page_lock;
+	struct gvt_shared_page *shared_page;
 };
 
 /* used in computing the new watermarks state */
diff --git a/drivers/gpu/drm/i915/i915_pvinfo.h b/drivers/gpu/drm/i915/i915_pvinfo.h
index 26709e8..179d558 100644
--- a/drivers/gpu/drm/i915/i915_pvinfo.h
+++ b/drivers/gpu/drm/i915/i915_pvinfo.h
@@ -49,6 +49,24 @@  enum vgt_g2v_type {
 	VGT_G2V_MAX,
 };
 
+struct pv_ppgtt_update {
+	u64 pdp;
+	u64 start;
+	u64 length;
+	u32 cache_level;
+};
+
+/*
+ * shared page(4KB) between gvt and VM, could be allocated by guest driver
+ * or a fixed location in PCI bar 0 region
+ */
+struct gvt_shared_page {
+	u32 elsp_data[4];
+	u32 reg_addr;
+	u32 disable_irq;
+	struct pv_ppgtt_update pv_ppgtt;
+};
+
 #define VGPU_PVMMIO(vgpu) vgpu_vreg_t(vgpu, vgtif_reg(enable_pvmmio))
 
 /*
@@ -121,8 +139,12 @@  struct vgt_if {
 	u32 execlist_context_descriptor_lo;
 	u32 execlist_context_descriptor_hi;
 	u32 enable_pvmmio;
+	struct {
+		u32 lo;
+		u32 hi;
+	} shared_page_gpa;
 
-	u32  rsv7[0x200 - 25];    /* pad to one page */
+	u32  rsv7[0x200 - 27];    /* pad to one page */
 } __packed;
 
 #define vgtif_reg(x) \
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index 907bbd2..609eefe 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -62,6 +62,7 @@  void i915_check_vgpu(struct drm_i915_private *dev_priv)
 {
 	u64 magic;
 	u16 version_major;
+	u64 shared_page_gpa;
 
 	BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
 
@@ -91,7 +92,23 @@  void i915_check_vgpu(struct drm_i915_private *dev_priv)
 			dev_priv->vgpu.pv_caps);
 	dev_priv->vgpu.pv_caps = __raw_i915_read32(dev_priv,
 			vgtif_reg(enable_pvmmio));
-
+	if (intel_vgpu_active(dev_priv) && dev_priv->vgpu.pv_caps) {
+		dev_priv->vgpu.shared_page =  (struct gvt_shared_page *)
+				get_zeroed_page(GFP_KERNEL);
+		if (!dev_priv->vgpu.shared_page) {
+			DRM_ERROR("out of memory for shared page memory\n");
+			return;
+		}
+		shared_page_gpa = __pa(dev_priv->vgpu.shared_page);
+		__raw_i915_write32(dev_priv, vgtif_reg(shared_page_gpa.lo),
+				lower_32_bits(shared_page_gpa));
+		__raw_i915_write32(dev_priv, vgtif_reg(shared_page_gpa.hi),
+				upper_32_bits(shared_page_gpa));
+
+		spin_lock_init(&dev_priv->vgpu.shared_page_lock);
+
+		DRM_INFO("VGPU shared page enabled\n");
+	}
 	DRM_INFO("Virtual GPU for Intel GVT-g detected with pvmmio 0x%x\n",
 			dev_priv->vgpu.pv_caps);
 }