@@ -358,7 +358,7 @@ static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
* enabled/disabled virtual pipes.
*
*/
-void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt)
+void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt, bool use_hw_vblank)
{
struct intel_gvt_irq *irq = &gvt->irq;
struct intel_vgpu *vgpu;
@@ -369,25 +369,29 @@ void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt)
for_each_active_vgpu(gvt, vgpu, id) {
for (pipe = 0; pipe < I915_MAX_PIPES; pipe++) {
if (pipe_is_enabled(vgpu, pipe)) {
- found = true;
- break;
+ if (use_hw_vblank) {
+ wake_up_process(gvt->vblank_thread[pipe]);
+ } else {
+ found = true;
+ break;
+ }
}
}
- if (found)
- break;
}
- /* all the pipes are disabled */
- if (!found)
- hrtimer_cancel(&irq->vblank_timer.timer);
- else
- hrtimer_start(&irq->vblank_timer.timer,
- ktime_add_ns(ktime_get(), irq->vblank_timer.period),
- HRTIMER_MODE_ABS);
+ if (!use_hw_vblank) {
+ /* all the pipes are disabled */
+ if (!found)
+ hrtimer_cancel(&irq->vblank_timer.timer);
+ else
+ hrtimer_start(&irq->vblank_timer.timer,
+ ktime_add_ns(ktime_get(), irq->vblank_timer.period),
+ HRTIMER_MODE_ABS);
+ }
mutex_unlock(&gvt->lock);
}
-static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
+void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_vgpu_irq *irq = &vgpu->irq;
@@ -187,12 +187,13 @@ static inline char *vgpu_edid_str(enum intel_vgpu_edid id)
}
void intel_gvt_emulate_vblank(struct intel_gvt *gvt);
-void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt);
+void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt, bool use_hw_vblank);
int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution);
void intel_vgpu_reset_display(struct intel_vgpu *vgpu);
void intel_vgpu_clean_display(struct intel_vgpu *vgpu);
+void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe);
int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe);
@@ -250,6 +250,94 @@ static void init_device_info(struct intel_gvt *gvt)
info->msi_cap_offset = pdev->msi_cap;
}
+static int emulate_crtc_a_vblank_thread(void *data)
+{
+ struct intel_gvt *gvt = (struct intel_gvt *)data;
+ struct drm_device *dev = &gvt->dev_priv->drm;
+ struct intel_vgpu *vgpu;
+ struct completion vblank_done = gvt->vblank_done[PIPE_A];
+ int ret;
+ int id;
+
+ DRM_DEBUG_KMS("vblank service thread a start\n");
+
+ while (!kthread_should_stop()) {
+ init_completion(&vblank_done);
+retry:
+ ret = intel_gvt_register_HW_vblank_event(dev, &vblank_done, PIPE_A);
+ if (ret) {
+ goto retry;
+ }
+
+ /* wait for the complete */
+ wait_for_completion_interruptible(&vblank_done);
+
+ /* inject the vblank to gvt */
+ for_each_active_vgpu(gvt, vgpu, id)
+ emulate_vblank_on_pipe(vgpu, PIPE_A);
+ }
+
+ return 0;
+}
+
+static int emulate_crtc_b_vblank_thread(void *data)
+{
+ struct intel_gvt *gvt = (struct intel_gvt *)data;
+ struct drm_device *dev = &gvt->dev_priv->drm;
+ struct intel_vgpu *vgpu;
+ struct completion vblank_done = gvt->vblank_done[PIPE_B];
+ int ret;
+ int id;
+
+ DRM_DEBUG_KMS("vblank service thread b start\n");
+
+ while (!kthread_should_stop()) {
+ init_completion(&vblank_done);
+retry:
+ ret = intel_gvt_register_HW_vblank_event(dev, &vblank_done, PIPE_B);
+ if (ret)
+ goto retry;
+
+ /* wait for the complete */
+ wait_for_completion_interruptible(&vblank_done);
+
+ /* inject the vblank to gvt */
+ for_each_active_vgpu(gvt, vgpu, id)
+ emulate_vblank_on_pipe(vgpu, PIPE_B);
+ }
+
+ return 0;
+}
+
+static int emulate_crtc_c_vblank_thread(void *data)
+{
+ struct intel_gvt *gvt = (struct intel_gvt *)data;
+ struct drm_device *dev = &gvt->dev_priv->drm;
+ struct intel_vgpu *vgpu;
+ struct completion vblank_done = gvt->vblank_done[PIPE_C];
+ int ret;
+ int id;
+
+ DRM_DEBUG_KMS("vblank service thread c start\n");
+
+ while (!kthread_should_stop()) {
+ init_completion(&vblank_done);
+retry:
+ ret = intel_gvt_register_HW_vblank_event(dev, &vblank_done, PIPE_C);
+ if (ret)
+ goto retry;
+
+ /* wait for the complete */
+ wait_for_completion_interruptible(&vblank_done);
+
+ /* inject the vblank to gvt */
+ for_each_active_vgpu(gvt, vgpu, id)
+ emulate_vblank_on_pipe(vgpu, PIPE_C);
+ }
+
+ return 0;
+}
+
static int gvt_service_thread(void *data)
{
struct intel_gvt *gvt = (struct intel_gvt *)data;
@@ -285,6 +373,9 @@ static int gvt_service_thread(void *data)
static void clean_service_thread(struct intel_gvt *gvt)
{
kthread_stop(gvt->service_thread);
+ kthread_stop(gvt->vblank_thread[PIPE_A]);
+ kthread_stop(gvt->vblank_thread[PIPE_B]);
+ kthread_stop(gvt->vblank_thread[PIPE_C]);
}
static int init_service_thread(struct intel_gvt *gvt)
@@ -297,6 +388,27 @@ static int init_service_thread(struct intel_gvt *gvt)
gvt_err("fail to start service thread.\n");
return PTR_ERR(gvt->service_thread);
}
+
+ gvt->vblank_thread[PIPE_A] = kthread_create(emulate_crtc_a_vblank_thread,
+ gvt, "emulate_crtc_a_vblank_thread");
+ if (IS_ERR(gvt->vblank_thread[PIPE_A])) {
+ gvt_err("fail to start service thread.\n");
+ return PTR_ERR(gvt->vblank_thread[PIPE_A]);
+ }
+
+ gvt->vblank_thread[PIPE_B] = kthread_create(emulate_crtc_b_vblank_thread,
+ gvt, "emulate_crtc_b_vblank_thread");
+ if (IS_ERR(gvt->vblank_thread[PIPE_B])) {
+ gvt_err("fail to start service thread.\n");
+ return PTR_ERR(gvt->vblank_thread[PIPE_B]);
+ }
+
+ gvt->vblank_thread[PIPE_C] = kthread_create(emulate_crtc_c_vblank_thread,
+ gvt, "emulate_crtc_c_vblank_thread");
+ if (IS_ERR(gvt->vblank_thread[PIPE_C])) {
+ gvt_err("fail to start service thread.\n");
+ return PTR_ERR(gvt->vblank_thread[PIPE_C]);
+ }
return 0;
}
@@ -356,6 +356,9 @@ struct intel_gvt {
/* vGPU plane assignment */
struct assigned_plane assigned_plane[I915_MAX_PIPES][I915_MAX_PLANES];
+
+ struct task_struct *vblank_thread[I915_MAX_PIPES];
+ struct completion vblank_done[I915_MAX_PIPES];
};
static inline struct intel_gvt *to_gvt(struct drm_i915_private *i915)
@@ -449,10 +449,12 @@ static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
vgpu_vreg(vgpu, offset) |= I965_PIPECONF_ACTIVE;
else
vgpu_vreg(vgpu, offset) &= ~I965_PIPECONF_ACTIVE;
+
/* vgpu_lock already hold by emulate mmio r/w */
mutex_unlock(&vgpu->vgpu_lock);
- intel_gvt_check_vblank_emulation(vgpu->gvt);
+ intel_gvt_check_vblank_emulation(vgpu->gvt, true);
mutex_lock(&vgpu->vgpu_lock);
+
return 0;
}
@@ -633,6 +633,60 @@ void intel_vgpu_trigger_virtual_event(struct intel_vgpu *vgpu,
ops->check_pending_irq(vgpu);
}
+static void release_vblank_event(struct completion *completion)
+{
+ ;
+}
+
+int intel_gvt_register_HW_vblank_event(struct drm_device *dev,
+ struct completion *vblank_done, int pipe)
+{
+ struct drm_pending_vblank_event *e;
+ struct drm_crtc *crtc;
+ unsigned long flags;
+ int ret;
+
+ DRM_DEBUG_KMS("%s begin with pipe %d, com %p, dev %p", __func__, pipe, vblank_done, dev);
+
+ crtc = drm_crtc_from_index(dev, pipe);
+ if (!crtc) {
+ DRM_ERROR("cannot get crtc %d\n", pipe);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = drm_crtc_vblank_get(crtc);
+ if (ret) {
+ DRM_DEBUG("crtc %d failed to acquire vblank counter, %d\n", pipe, ret);
+ goto out;
+ }
+
+ e = kzalloc(sizeof *e, GFP_KERNEL);
+ if (!e) {
+ drm_crtc_vblank_put(crtc);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* create a vblank event */
+ e->event.base.type = DRM_EVENT_VBLANK;
+ e->event.base.length = sizeof(e->event);
+ e->pipe = pipe;
+ e->base.completion = vblank_done;
+ e->base.completion_release = release_vblank_event;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ e->sequence = drm_crtc_accurate_vblank_count(crtc) + 1;
+
+ /* add the event to vblank list */
+ list_add_tail(&e->base.link, &dev->vblank_event_list);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ DRM_DEBUG_KMS("%s: success \n", __func__);
+out:
+ return ret;
+}
+
static void init_events(
struct intel_gvt_irq *irq)
{
@@ -213,6 +213,8 @@ struct intel_gvt_irq {
int intel_gvt_init_irq(struct intel_gvt *gvt);
void intel_gvt_clean_irq(struct intel_gvt *gvt);
+int intel_gvt_register_HW_vblank_event(struct drm_device *dev,
+ struct completion *vblank_done, int pipe);
void intel_vgpu_trigger_virtual_event(struct intel_vgpu *vgpu,
enum intel_gvt_event_type event);
For each physical crtc, Gvt-g has a kernel thread to get the HW vblank event on time and inject the interrupt event to the vGPUs which the display planes of the crtc are assigned to. Signed-off-by: Tina Zhang <tina.zhang@intel.com> Cc: Zhenyu Wang <zhenyuw@linux.intel.com> Cc: Zhi Wang <zhi.a.wang@intel.com> --- drivers/gpu/drm/i915/gvt/display.c | 30 ++++++---- drivers/gpu/drm/i915/gvt/display.h | 3 +- drivers/gpu/drm/i915/gvt/gvt.c | 112 +++++++++++++++++++++++++++++++++++ drivers/gpu/drm/i915/gvt/gvt.h | 3 + drivers/gpu/drm/i915/gvt/handlers.c | 4 +- drivers/gpu/drm/i915/gvt/interrupt.c | 54 +++++++++++++++++ drivers/gpu/drm/i915/gvt/interrupt.h | 2 + 7 files changed, 193 insertions(+), 15 deletions(-)