diff mbox series

[v6,1/2] drm/i915/gvt: Save/restore HW status to support GVT suspend/resume

Message ID 20201023081719.772816-1-colin.xu@intel.com (mailing list archive)
State New, archived
Headers show
Series Enable GVT suspend/resume | expand

Commit Message

Colin Xu Oct. 23, 2020, 8:17 a.m. UTC
This patch save/restore necessary GVT info during i915 suspend/resume so
that GVT enabled QEMU VM can continue running.

Only GGTT and fence regs are saved/restored now. GVT will save GGTT
entries on each host_entry update, restore the saved dirty entries
and re-init fence regs in resume routine.

V2:
- Change kzalloc/kfree to vzalloc/vfree since the space allocated
from kmalloc may not enough for all saved GGTT entries.
- Keep gvt suspend/resume wrapper in intel_gvt.h/intel_gvt.c and
move the actual implementation to gvt.h/gvt.c. (zhenyu)
- Check gvt config on and active with intel_gvt_active(). (zhenyu)

V3: (zhenyu)
- Incorrect copy length. Should be num entries * entry size.
- Use memcpy_toio()/memcpy_fromio() instead of memcpy for iomem.
- Add F_PM_SAVE flags to indicate which MMIOs to save/restore for PM.

V4:
Rebase.

V5:
Fail intel_gvt_save_ggtt as -ENOMEM if fail to alloc memory to save
ggtt. Free allocated ggtt_entries on failure.

V6:
Save host entry to per-vGPU gtt.ggtt_mm on each host_entry update.

Signed-off-by: Hang Yuan <hang.yuan@linux.intel.com>
Signed-off-by: Colin Xu <colin.xu@intel.com>
---
 drivers/gpu/drm/i915/gvt/gtt.c      | 75 +++++++++++++++++++++++++++++
 drivers/gpu/drm/i915/gvt/gtt.h      |  9 ++++
 drivers/gpu/drm/i915/gvt/gvt.c      |  8 +++
 drivers/gpu/drm/i915/gvt/gvt.h      |  3 ++
 drivers/gpu/drm/i915/gvt/handlers.c | 39 +++++++++++++--
 drivers/gpu/drm/i915/gvt/mmio.h     |  3 ++
 drivers/gpu/drm/i915/intel_gvt.c    | 15 ++++++
 drivers/gpu/drm/i915/intel_gvt.h    |  5 ++
 8 files changed, 154 insertions(+), 3 deletions(-)

Comments

Zhenyu Wang Oct. 26, 2020, 9:19 a.m. UTC | #1
On 2020.10.23 16:17:19 +0800, Colin Xu wrote:
> This patch save/restore necessary GVT info during i915 suspend/resume so
> that GVT enabled QEMU VM can continue running.
> 
> Only GGTT and fence regs are saved/restored now. GVT will save GGTT
> entries on each host_entry update, restore the saved dirty entries
> and re-init fence regs in resume routine.
> 
> V2:
> - Change kzalloc/kfree to vzalloc/vfree since the space allocated
> from kmalloc may not enough for all saved GGTT entries.
> - Keep gvt suspend/resume wrapper in intel_gvt.h/intel_gvt.c and
> move the actual implementation to gvt.h/gvt.c. (zhenyu)
> - Check gvt config on and active with intel_gvt_active(). (zhenyu)
> 
> V3: (zhenyu)
> - Incorrect copy length. Should be num entries * entry size.
> - Use memcpy_toio()/memcpy_fromio() instead of memcpy for iomem.
> - Add F_PM_SAVE flags to indicate which MMIOs to save/restore for PM.
> 
> V4:
> Rebase.
> 
> V5:
> Fail intel_gvt_save_ggtt as -ENOMEM if fail to alloc memory to save
> ggtt. Free allocated ggtt_entries on failure.
> 
> V6:
> Save host entry to per-vGPU gtt.ggtt_mm on each host_entry update.
> 
> Signed-off-by: Hang Yuan <hang.yuan@linux.intel.com>
> Signed-off-by: Colin Xu <colin.xu@intel.com>
> ---
>  drivers/gpu/drm/i915/gvt/gtt.c      | 75 +++++++++++++++++++++++++++++
>  drivers/gpu/drm/i915/gvt/gtt.h      |  9 ++++
>  drivers/gpu/drm/i915/gvt/gvt.c      |  8 +++
>  drivers/gpu/drm/i915/gvt/gvt.h      |  3 ++
>  drivers/gpu/drm/i915/gvt/handlers.c | 39 +++++++++++++--
>  drivers/gpu/drm/i915/gvt/mmio.h     |  3 ++
>  drivers/gpu/drm/i915/intel_gvt.c    | 15 ++++++
>  drivers/gpu/drm/i915/intel_gvt.h    |  5 ++
>  8 files changed, 154 insertions(+), 3 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
> index a3a4305eda01..33385d640cb9 100644
> --- a/drivers/gpu/drm/i915/gvt/gtt.c
> +++ b/drivers/gpu/drm/i915/gvt/gtt.c
> @@ -636,9 +636,20 @@ static void ggtt_set_host_entry(struct intel_vgpu_mm *mm,
>  		struct intel_gvt_gtt_entry *entry, unsigned long index)
>  {
>  	struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
> +	unsigned long offset = index;
>  
>  	GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
>  
> +	if (vgpu_gmadr_is_aperture(mm->vgpu, index << I915_GTT_PAGE_SHIFT)) {
> +		offset -= (vgpu_aperture_gmadr_base(mm->vgpu) >> PAGE_SHIFT);
> +		mm->ggtt_mm.host_aperture[offset].val64 = entry->val64;
> +		mm->ggtt_mm.host_aperture[offset].dirty = true;
> +	} else if (vgpu_gmadr_is_hidden(mm->vgpu, index << I915_GTT_PAGE_SHIFT)) {
> +		offset -= (vgpu_hidden_gmadr_base(mm->vgpu) >> PAGE_SHIFT);
> +		mm->ggtt_mm.host_hidden[offset].val64 = entry->val64;
> +		mm->ggtt_mm.host_hidden[offset].dirty = true;
> +	}

Looks 'dirty' flag is not needed at all, as what you need is to restore all present
entries, so can just check present bit of val64 for that.

> +
>  	pte_ops->set_entry(NULL, entry, index, false, 0, mm->vgpu);
>  }
>  
> @@ -1928,6 +1939,7 @@ static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu)
>  {
>  	struct intel_vgpu_mm *mm;
>  	unsigned long nr_entries;
> +	u64 size;
>  
>  	mm = vgpu_alloc_mm(vgpu);
>  	if (!mm)
> @@ -1944,6 +1956,25 @@ static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu)
>  		return ERR_PTR(-ENOMEM);
>  	}
>  
> +	size = (vgpu_aperture_sz(vgpu) >> PAGE_SHIFT) *
> +		sizeof(struct intel_vgpu_host_ggtt);
> +	mm->ggtt_mm.host_aperture = vzalloc(size);

I think normally just write required size calculation in alloc parameter
instead of in extra variable.

> +	if (!mm->ggtt_mm.host_aperture) {
> +		vfree(mm->ggtt_mm.virtual_ggtt);
> +		vgpu_free_mm(mm);
> +		return ERR_PTR(-ENOMEM);
> +	}
> +
> +	size = (vgpu_hidden_sz(vgpu) >> PAGE_SHIFT) *
> +		sizeof(struct intel_vgpu_host_ggtt);
> +	mm->ggtt_mm.host_hidden = vzalloc(size);
> +	if (!mm->ggtt_mm.host_hidden) {
> +		vfree(mm->ggtt_mm.host_aperture);
> +		vfree(mm->ggtt_mm.virtual_ggtt);
> +		vgpu_free_mm(mm);
> +		return ERR_PTR(-ENOMEM);
> +	}
> +
>  	return mm;
>  }
>  
> @@ -1971,6 +2002,8 @@ void _intel_vgpu_mm_release(struct kref *mm_ref)
>  		invalidate_ppgtt_mm(mm);
>  	} else {
>  		vfree(mm->ggtt_mm.virtual_ggtt);
> +		vfree(mm->ggtt_mm.host_aperture);
> +		vfree(mm->ggtt_mm.host_hidden);
>  	}
>  
>  	vgpu_free_mm(mm);
> @@ -2852,3 +2885,45 @@ void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu)
>  	intel_vgpu_destroy_all_ppgtt_mm(vgpu);
>  	intel_vgpu_reset_ggtt(vgpu, true);
>  }
> +
> +/**
> + * intel_gvt_restore_ggtt - restore all vGPU's ggtt entries
> + * @gvt: intel gvt device
> + *
> + * This function is called at driver resume stage to restore
> + * GGTT entries of every vGPU.
> + *
> + */
> +void intel_gvt_restore_ggtt(struct intel_gvt *gvt)
> +{
> +	struct intel_vgpu *vgpu;
> +	struct intel_vgpu_mm *mm;
> +	struct intel_vgpu_host_ggtt *host_ggtt;
> +	int id;
> +	u32 idx, num_low, num_hi, offset;
> +
> +	/* Restore dirty host ggtt for all vGPUs */
> +	idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) {
> +		mm = vgpu->gtt.ggtt_mm;
> +
> +		num_low = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT;
> +		offset = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT;
> +		for (idx = 0; idx < num_low; idx++) {
> +			host_ggtt = &mm->ggtt_mm.host_aperture[idx];
> +			if (host_ggtt->dirty) {
> +				write_pte64(vgpu->gvt->gt->ggtt,
> +					    offset + idx, host_ggtt->val64);
> +			}
> +		}
> +
> +		num_hi = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT;
> +		offset = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT;
> +		for (idx = 0; idx < num_hi; idx++) {
> +			host_ggtt = &mm->ggtt_mm.host_hidden[idx];
> +			if (host_ggtt->dirty) {
> +				write_pte64(vgpu->gvt->gt->ggtt,
> +					    offset + idx, host_ggtt->val64);
> +			}
> +		}
> +	}
> +}
> diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
> index 52d0d88abd86..7ec333a2ead5 100644
> --- a/drivers/gpu/drm/i915/gvt/gtt.h
> +++ b/drivers/gpu/drm/i915/gvt/gtt.h
> @@ -139,6 +139,11 @@ struct intel_gvt_partial_pte {
>  	struct list_head list;
>  };
>  
> +struct intel_vgpu_host_ggtt {
> +	u64 val64;
> +	bool dirty;
> +};
> +
>  struct intel_vgpu_mm {
>  	enum intel_gvt_mm_type type;
>  	struct intel_vgpu *vgpu;
> @@ -164,6 +169,9 @@ struct intel_vgpu_mm {
>  		} ppgtt_mm;
>  		struct {
>  			void *virtual_ggtt;
> +			/* Save/restore for PM */
> +			struct intel_vgpu_host_ggtt *host_aperture;
> +			struct intel_vgpu_host_ggtt *host_hidden;
>  			struct list_head partial_pte_list;
>  		} ggtt_mm;
>  	};
> @@ -280,5 +288,6 @@ int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu,
>  	unsigned int off, void *p_data, unsigned int bytes);
>  
>  void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu);
> +void intel_gvt_restore_ggtt(struct intel_gvt *gvt);
>  
>  #endif /* _GVT_GTT_H_ */
> diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
> index c7c561237883..d0e9c43b031b 100644
> --- a/drivers/gpu/drm/i915/gvt/gvt.c
> +++ b/drivers/gpu/drm/i915/gvt/gvt.c
> @@ -405,6 +405,14 @@ int intel_gvt_init_device(struct drm_i915_private *i915)
>  	return ret;
>  }
>  
> +int
> +intel_gvt_pm_resume(struct intel_gvt *gvt)
> +{
> +	intel_gvt_restore_regs(gvt);
> +	intel_gvt_restore_ggtt(gvt);
> +	return 0;

Please split restore_regs part which seems to be separate.

> +}
> +
>  int
>  intel_gvt_register_hypervisor(struct intel_gvt_mpt *m)
>  {
> diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
> index a81cf0f01e78..b3d6355dd797 100644
> --- a/drivers/gpu/drm/i915/gvt/gvt.h
> +++ b/drivers/gpu/drm/i915/gvt/gvt.h
> @@ -255,6 +255,8 @@ struct intel_gvt_mmio {
>  #define F_CMD_ACCESS	(1 << 3)
>  /* This reg has been accessed by a VM */
>  #define F_ACCESSED	(1 << 4)
> +/* This reg requires save & restore during host PM suspend/resume */
> +#define F_PM_SAVE	(1 << 5)
>  /* This reg could be accessed by unaligned address */
>  #define F_UNALIGN	(1 << 6)
>  /* This reg is in GVT's mmio save-restor list and in hardware
> @@ -685,6 +687,7 @@ void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu);
>  void intel_gvt_debugfs_init(struct intel_gvt *gvt);
>  void intel_gvt_debugfs_clean(struct intel_gvt *gvt);
>  
> +int intel_gvt_pm_resume(struct intel_gvt *gvt);
>  
>  #include "trace.h"
>  #include "mpt.h"
> diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
> index 89353b97589a..1f2432d7df34 100644
> --- a/drivers/gpu/drm/i915/gvt/handlers.c
> +++ b/drivers/gpu/drm/i915/gvt/handlers.c
> @@ -3120,9 +3120,10 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
>  	MMIO_DFH(TRVATTL3PTRDW(2), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
>  	MMIO_DFH(TRVATTL3PTRDW(3), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
>  	MMIO_DFH(TRVADR, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
> -	MMIO_DFH(TRTTE, D_SKL_PLUS, F_CMD_ACCESS,
> -		NULL, gen9_trtte_write);
> -	MMIO_DH(_MMIO(0x4dfc), D_SKL_PLUS, NULL, gen9_trtt_chicken_write);
> +	MMIO_DFH(TRTTE, D_SKL_PLUS, F_CMD_ACCESS | F_PM_SAVE,
> +		 NULL, gen9_trtte_write);
> +	MMIO_DFH(_MMIO(0x4dfc), D_SKL_PLUS, F_PM_SAVE,
> +		 NULL, gen9_trtt_chicken_write);
>  
>  	MMIO_D(_MMIO(0x46430), D_SKL_PLUS);
>  
> @@ -3661,3 +3662,35 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
>  		intel_vgpu_default_mmio_read(vgpu, offset, pdata, bytes) :
>  		intel_vgpu_default_mmio_write(vgpu, offset, pdata, bytes);
>  }
> +
> +static inline int mmio_pm_restore_handler(struct intel_gvt *gvt,
> +					  u32 offset, void *data)
> +{
> +	struct intel_vgpu *vgpu = data;
> +	struct drm_i915_private *dev_priv = gvt->gt->i915;
> +
> +	if (gvt->mmio.mmio_attribute[offset >> 2] & F_PM_SAVE)
> +		I915_WRITE(_MMIO(offset), vgpu_vreg(vgpu, offset));
> +
> +	return 0;
> +}
> +
> +void intel_gvt_restore_regs(struct intel_gvt *gvt)
> +{
> +	struct intel_vgpu *vgpu;
> +	int i, id;
> +	u64 val;
> +
> +	idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) {
> +		mmio_hw_access_pre(gvt->gt);
> +		for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
> +			val = vgpu_vreg64(vgpu, fence_num_to_offset(i));
> +			intel_vgpu_write_fence(vgpu, i, val);
> +		}
> +
> +		intel_gvt_for_each_tracked_mmio(gvt,
> +						mmio_pm_restore_handler, vgpu);
> +
> +		mmio_hw_access_post(gvt->gt);
> +	}
> +}
> diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h
> index cc4812648bf4..999d9dda0614 100644
> --- a/drivers/gpu/drm/i915/gvt/mmio.h
> +++ b/drivers/gpu/drm/i915/gvt/mmio.h
> @@ -104,4 +104,7 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
>  
>  int intel_vgpu_mask_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
>  				  void *p_data, unsigned int bytes);
> +
> +void intel_gvt_restore_regs(struct intel_gvt *gvt);
> +
>  #endif
> diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
> index 99fe8aef1c67..4e70c1a9ef2e 100644
> --- a/drivers/gpu/drm/i915/intel_gvt.c
> +++ b/drivers/gpu/drm/i915/intel_gvt.c
> @@ -24,6 +24,7 @@
>  #include "i915_drv.h"
>  #include "i915_vgpu.h"
>  #include "intel_gvt.h"
> +#include "gvt/gvt.h"
>  
>  /**
>   * DOC: Intel GVT-g host support
> @@ -147,3 +148,17 @@ void intel_gvt_driver_remove(struct drm_i915_private *dev_priv)
>  
>  	intel_gvt_clean_device(dev_priv);
>  }
> +
> +/**
> + * intel_gvt_resume - GVT resume routine wapper
> + *
> + * @dev_priv: drm i915 private *
> + *
> + * This function is called at the i915 driver resume stage to restore required
> + * HW status for GVT so that vGPU can continue running after resumed.
> + */
> +void intel_gvt_resume(struct drm_i915_private *dev_priv)
> +{
> +	if (intel_gvt_active(dev_priv))
> +		intel_gvt_pm_resume(dev_priv->gvt);
> +}
> diff --git a/drivers/gpu/drm/i915/intel_gvt.h b/drivers/gpu/drm/i915/intel_gvt.h
> index 502fad8a8652..d7d3fb6186fd 100644
> --- a/drivers/gpu/drm/i915/intel_gvt.h
> +++ b/drivers/gpu/drm/i915/intel_gvt.h
> @@ -33,6 +33,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv);
>  void intel_gvt_clean_device(struct drm_i915_private *dev_priv);
>  int intel_gvt_init_host(void);
>  void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv);
> +void intel_gvt_resume(struct drm_i915_private *dev_priv);
>  #else
>  static inline int intel_gvt_init(struct drm_i915_private *dev_priv)
>  {
> @@ -46,6 +47,10 @@ static inline void intel_gvt_driver_remove(struct drm_i915_private *dev_priv)
>  static inline void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv)
>  {
>  }
> +
> +static inline void intel_gvt_resume(struct drm_i915_private *dev_priv)
> +{
> +}
>  #endif
>  
>  #endif /* _INTEL_GVT_H_ */
> -- 
> 2.29.0
> 
> _______________________________________________
> intel-gvt-dev mailing list
> intel-gvt-dev@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gvt-dev
Colin Xu Oct. 27, 2020, 12:42 a.m. UTC | #2
On 2020-10-26 17:19, Zhenyu Wang wrote:
> On 2020.10.23 16:17:19 +0800, Colin Xu wrote:
>> This patch save/restore necessary GVT info during i915 suspend/resume so
>> that GVT enabled QEMU VM can continue running.
>>
>> Only GGTT and fence regs are saved/restored now. GVT will save GGTT
>> entries on each host_entry update, restore the saved dirty entries
>> and re-init fence regs in resume routine.
>>
>> V2:
>> - Change kzalloc/kfree to vzalloc/vfree since the space allocated
>> from kmalloc may not enough for all saved GGTT entries.
>> - Keep gvt suspend/resume wrapper in intel_gvt.h/intel_gvt.c and
>> move the actual implementation to gvt.h/gvt.c. (zhenyu)
>> - Check gvt config on and active with intel_gvt_active(). (zhenyu)
>>
>> V3: (zhenyu)
>> - Incorrect copy length. Should be num entries * entry size.
>> - Use memcpy_toio()/memcpy_fromio() instead of memcpy for iomem.
>> - Add F_PM_SAVE flags to indicate which MMIOs to save/restore for PM.
>>
>> V4:
>> Rebase.
>>
>> V5:
>> Fail intel_gvt_save_ggtt as -ENOMEM if fail to alloc memory to save
>> ggtt. Free allocated ggtt_entries on failure.
>>
>> V6:
>> Save host entry to per-vGPU gtt.ggtt_mm on each host_entry update.
>>
>> Signed-off-by: Hang Yuan <hang.yuan@linux.intel.com>
>> Signed-off-by: Colin Xu <colin.xu@intel.com>
>> ---
>>   drivers/gpu/drm/i915/gvt/gtt.c      | 75 +++++++++++++++++++++++++++++
>>   drivers/gpu/drm/i915/gvt/gtt.h      |  9 ++++
>>   drivers/gpu/drm/i915/gvt/gvt.c      |  8 +++
>>   drivers/gpu/drm/i915/gvt/gvt.h      |  3 ++
>>   drivers/gpu/drm/i915/gvt/handlers.c | 39 +++++++++++++--
>>   drivers/gpu/drm/i915/gvt/mmio.h     |  3 ++
>>   drivers/gpu/drm/i915/intel_gvt.c    | 15 ++++++
>>   drivers/gpu/drm/i915/intel_gvt.h    |  5 ++
>>   8 files changed, 154 insertions(+), 3 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
>> index a3a4305eda01..33385d640cb9 100644
>> --- a/drivers/gpu/drm/i915/gvt/gtt.c
>> +++ b/drivers/gpu/drm/i915/gvt/gtt.c
>> @@ -636,9 +636,20 @@ static void ggtt_set_host_entry(struct intel_vgpu_mm *mm,
>>   		struct intel_gvt_gtt_entry *entry, unsigned long index)
>>   {
>>   	struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
>> +	unsigned long offset = index;
>>   
>>   	GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
>>   
>> +	if (vgpu_gmadr_is_aperture(mm->vgpu, index << I915_GTT_PAGE_SHIFT)) {
>> +		offset -= (vgpu_aperture_gmadr_base(mm->vgpu) >> PAGE_SHIFT);
>> +		mm->ggtt_mm.host_aperture[offset].val64 = entry->val64;
>> +		mm->ggtt_mm.host_aperture[offset].dirty = true;
>> +	} else if (vgpu_gmadr_is_hidden(mm->vgpu, index << I915_GTT_PAGE_SHIFT)) {
>> +		offset -= (vgpu_hidden_gmadr_base(mm->vgpu) >> PAGE_SHIFT);
>> +		mm->ggtt_mm.host_hidden[offset].val64 = entry->val64;
>> +		mm->ggtt_mm.host_hidden[offset].dirty = true;
>> +	}
> Looks 'dirty' flag is not needed at all, as what you need is to restore all present
> entries, so can just check present bit of val64 for that.

Strictly it's different. Host could update a page which isn't present 
but other PTE fields are still valid, this is "present". For dirty it's 
purpose is to record whether or not host has update this entry, 
regardless the page presence. If only check the present bit, there will 
be a case that previous PTE points to region that shouldn't be walked by 
the new PDE, but new PDE/PTE update only updates PDE w/ present, but PTE 
w/o present. That could leak information by new PDE + old PTE. 
Hypervisor should follow guest behavior to update all PDE/PTEs, and 
guest should make sure partial update won't leak information. If 
hypervisor doesn't follow guest page update, information could be leaked.

And actually, when vGPU created, all GGTT pages are updates so in 
practice, all entries are dirty. So from real practice, it's true that 
dirty check can be removed.

>
>> +
>>   	pte_ops->set_entry(NULL, entry, index, false, 0, mm->vgpu);
>>   }
>>   
>> @@ -1928,6 +1939,7 @@ static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu)
>>   {
>>   	struct intel_vgpu_mm *mm;
>>   	unsigned long nr_entries;
>> +	u64 size;
>>   
>>   	mm = vgpu_alloc_mm(vgpu);
>>   	if (!mm)
>> @@ -1944,6 +1956,25 @@ static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu)
>>   		return ERR_PTR(-ENOMEM);
>>   	}
>>   
>> +	size = (vgpu_aperture_sz(vgpu) >> PAGE_SHIFT) *
>> +		sizeof(struct intel_vgpu_host_ggtt);
>> +	mm->ggtt_mm.host_aperture = vzalloc(size);
> I think normally just write required size calculation in alloc parameter
> instead of in extra variable.
Only for trim line length within 80 otherwise align to parentheses 
doesn't look clean. Since kernel has 80 columns as the 'strong preferred 
limit', if it's OK I'll put it into same line then no extra variable is 
required.
>
>> +	if (!mm->ggtt_mm.host_aperture) {
>> +		vfree(mm->ggtt_mm.virtual_ggtt);
>> +		vgpu_free_mm(mm);
>> +		return ERR_PTR(-ENOMEM);
>> +	}
>> +
>> +	size = (vgpu_hidden_sz(vgpu) >> PAGE_SHIFT) *
>> +		sizeof(struct intel_vgpu_host_ggtt);
>> +	mm->ggtt_mm.host_hidden = vzalloc(size);
>> +	if (!mm->ggtt_mm.host_hidden) {
>> +		vfree(mm->ggtt_mm.host_aperture);
>> +		vfree(mm->ggtt_mm.virtual_ggtt);
>> +		vgpu_free_mm(mm);
>> +		return ERR_PTR(-ENOMEM);
>> +	}
>> +
>>   	return mm;
>>   }
>>   
>> @@ -1971,6 +2002,8 @@ void _intel_vgpu_mm_release(struct kref *mm_ref)
>>   		invalidate_ppgtt_mm(mm);
>>   	} else {
>>   		vfree(mm->ggtt_mm.virtual_ggtt);
>> +		vfree(mm->ggtt_mm.host_aperture);
>> +		vfree(mm->ggtt_mm.host_hidden);
>>   	}
>>   
>>   	vgpu_free_mm(mm);
>> @@ -2852,3 +2885,45 @@ void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu)
>>   	intel_vgpu_destroy_all_ppgtt_mm(vgpu);
>>   	intel_vgpu_reset_ggtt(vgpu, true);
>>   }
>> +
>> +/**
>> + * intel_gvt_restore_ggtt - restore all vGPU's ggtt entries
>> + * @gvt: intel gvt device
>> + *
>> + * This function is called at driver resume stage to restore
>> + * GGTT entries of every vGPU.
>> + *
>> + */
>> +void intel_gvt_restore_ggtt(struct intel_gvt *gvt)
>> +{
>> +	struct intel_vgpu *vgpu;
>> +	struct intel_vgpu_mm *mm;
>> +	struct intel_vgpu_host_ggtt *host_ggtt;
>> +	int id;
>> +	u32 idx, num_low, num_hi, offset;
>> +
>> +	/* Restore dirty host ggtt for all vGPUs */
>> +	idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) {
>> +		mm = vgpu->gtt.ggtt_mm;
>> +
>> +		num_low = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT;
>> +		offset = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT;
>> +		for (idx = 0; idx < num_low; idx++) {
>> +			host_ggtt = &mm->ggtt_mm.host_aperture[idx];
>> +			if (host_ggtt->dirty) {
>> +				write_pte64(vgpu->gvt->gt->ggtt,
>> +					    offset + idx, host_ggtt->val64);
>> +			}
>> +		}
>> +
>> +		num_hi = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT;
>> +		offset = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT;
>> +		for (idx = 0; idx < num_hi; idx++) {
>> +			host_ggtt = &mm->ggtt_mm.host_hidden[idx];
>> +			if (host_ggtt->dirty) {
>> +				write_pte64(vgpu->gvt->gt->ggtt,
>> +					    offset + idx, host_ggtt->val64);
>> +			}
>> +		}
>> +	}
>> +}
>> diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
>> index 52d0d88abd86..7ec333a2ead5 100644
>> --- a/drivers/gpu/drm/i915/gvt/gtt.h
>> +++ b/drivers/gpu/drm/i915/gvt/gtt.h
>> @@ -139,6 +139,11 @@ struct intel_gvt_partial_pte {
>>   	struct list_head list;
>>   };
>>   
>> +struct intel_vgpu_host_ggtt {
>> +	u64 val64;
>> +	bool dirty;
>> +};
>> +
>>   struct intel_vgpu_mm {
>>   	enum intel_gvt_mm_type type;
>>   	struct intel_vgpu *vgpu;
>> @@ -164,6 +169,9 @@ struct intel_vgpu_mm {
>>   		} ppgtt_mm;
>>   		struct {
>>   			void *virtual_ggtt;
>> +			/* Save/restore for PM */
>> +			struct intel_vgpu_host_ggtt *host_aperture;
>> +			struct intel_vgpu_host_ggtt *host_hidden;
>>   			struct list_head partial_pte_list;
>>   		} ggtt_mm;
>>   	};
>> @@ -280,5 +288,6 @@ int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu,
>>   	unsigned int off, void *p_data, unsigned int bytes);
>>   
>>   void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu);
>> +void intel_gvt_restore_ggtt(struct intel_gvt *gvt);
>>   
>>   #endif /* _GVT_GTT_H_ */
>> diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
>> index c7c561237883..d0e9c43b031b 100644
>> --- a/drivers/gpu/drm/i915/gvt/gvt.c
>> +++ b/drivers/gpu/drm/i915/gvt/gvt.c
>> @@ -405,6 +405,14 @@ int intel_gvt_init_device(struct drm_i915_private *i915)
>>   	return ret;
>>   }
>>   
>> +int
>> +intel_gvt_pm_resume(struct intel_gvt *gvt)
>> +{
>> +	intel_gvt_restore_regs(gvt);
>> +	intel_gvt_restore_ggtt(gvt);
>> +	return 0;
> Please split restore_regs part which seems to be separate.
Split intel_gvt_restore_regs() further to fence regs and normal tracked 
mmios with F_PM_SAVE?
>
>> +}
>> +
>>   int
>>   intel_gvt_register_hypervisor(struct intel_gvt_mpt *m)
>>   {
>> diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
>> index a81cf0f01e78..b3d6355dd797 100644
>> --- a/drivers/gpu/drm/i915/gvt/gvt.h
>> +++ b/drivers/gpu/drm/i915/gvt/gvt.h
>> @@ -255,6 +255,8 @@ struct intel_gvt_mmio {
>>   #define F_CMD_ACCESS	(1 << 3)
>>   /* This reg has been accessed by a VM */
>>   #define F_ACCESSED	(1 << 4)
>> +/* This reg requires save & restore during host PM suspend/resume */
>> +#define F_PM_SAVE	(1 << 5)
>>   /* This reg could be accessed by unaligned address */
>>   #define F_UNALIGN	(1 << 6)
>>   /* This reg is in GVT's mmio save-restor list and in hardware
>> @@ -685,6 +687,7 @@ void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu);
>>   void intel_gvt_debugfs_init(struct intel_gvt *gvt);
>>   void intel_gvt_debugfs_clean(struct intel_gvt *gvt);
>>   
>> +int intel_gvt_pm_resume(struct intel_gvt *gvt);
>>   
>>   #include "trace.h"
>>   #include "mpt.h"
>> diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
>> index 89353b97589a..1f2432d7df34 100644
>> --- a/drivers/gpu/drm/i915/gvt/handlers.c
>> +++ b/drivers/gpu/drm/i915/gvt/handlers.c
>> @@ -3120,9 +3120,10 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
>>   	MMIO_DFH(TRVATTL3PTRDW(2), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
>>   	MMIO_DFH(TRVATTL3PTRDW(3), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
>>   	MMIO_DFH(TRVADR, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
>> -	MMIO_DFH(TRTTE, D_SKL_PLUS, F_CMD_ACCESS,
>> -		NULL, gen9_trtte_write);
>> -	MMIO_DH(_MMIO(0x4dfc), D_SKL_PLUS, NULL, gen9_trtt_chicken_write);
>> +	MMIO_DFH(TRTTE, D_SKL_PLUS, F_CMD_ACCESS | F_PM_SAVE,
>> +		 NULL, gen9_trtte_write);
>> +	MMIO_DFH(_MMIO(0x4dfc), D_SKL_PLUS, F_PM_SAVE,
>> +		 NULL, gen9_trtt_chicken_write);
>>   
>>   	MMIO_D(_MMIO(0x46430), D_SKL_PLUS);
>>   
>> @@ -3661,3 +3662,35 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
>>   		intel_vgpu_default_mmio_read(vgpu, offset, pdata, bytes) :
>>   		intel_vgpu_default_mmio_write(vgpu, offset, pdata, bytes);
>>   }
>> +
>> +static inline int mmio_pm_restore_handler(struct intel_gvt *gvt,
>> +					  u32 offset, void *data)
>> +{
>> +	struct intel_vgpu *vgpu = data;
>> +	struct drm_i915_private *dev_priv = gvt->gt->i915;
>> +
>> +	if (gvt->mmio.mmio_attribute[offset >> 2] & F_PM_SAVE)
>> +		I915_WRITE(_MMIO(offset), vgpu_vreg(vgpu, offset));
>> +
>> +	return 0;
>> +}
>> +
>> +void intel_gvt_restore_regs(struct intel_gvt *gvt)
>> +{
>> +	struct intel_vgpu *vgpu;
>> +	int i, id;
>> +	u64 val;
>> +
>> +	idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) {
>> +		mmio_hw_access_pre(gvt->gt);
>> +		for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
>> +			val = vgpu_vreg64(vgpu, fence_num_to_offset(i));
>> +			intel_vgpu_write_fence(vgpu, i, val);
>> +		}
>> +
>> +		intel_gvt_for_each_tracked_mmio(gvt,
>> +						mmio_pm_restore_handler, vgpu);
>> +
>> +		mmio_hw_access_post(gvt->gt);
>> +	}
>> +}
>> diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h
>> index cc4812648bf4..999d9dda0614 100644
>> --- a/drivers/gpu/drm/i915/gvt/mmio.h
>> +++ b/drivers/gpu/drm/i915/gvt/mmio.h
>> @@ -104,4 +104,7 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
>>   
>>   int intel_vgpu_mask_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
>>   				  void *p_data, unsigned int bytes);
>> +
>> +void intel_gvt_restore_regs(struct intel_gvt *gvt);
>> +
>>   #endif
>> diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
>> index 99fe8aef1c67..4e70c1a9ef2e 100644
>> --- a/drivers/gpu/drm/i915/intel_gvt.c
>> +++ b/drivers/gpu/drm/i915/intel_gvt.c
>> @@ -24,6 +24,7 @@
>>   #include "i915_drv.h"
>>   #include "i915_vgpu.h"
>>   #include "intel_gvt.h"
>> +#include "gvt/gvt.h"
>>   
>>   /**
>>    * DOC: Intel GVT-g host support
>> @@ -147,3 +148,17 @@ void intel_gvt_driver_remove(struct drm_i915_private *dev_priv)
>>   
>>   	intel_gvt_clean_device(dev_priv);
>>   }
>> +
>> +/**
>> + * intel_gvt_resume - GVT resume routine wapper
>> + *
>> + * @dev_priv: drm i915 private *
>> + *
>> + * This function is called at the i915 driver resume stage to restore required
>> + * HW status for GVT so that vGPU can continue running after resumed.
>> + */
>> +void intel_gvt_resume(struct drm_i915_private *dev_priv)
>> +{
>> +	if (intel_gvt_active(dev_priv))
>> +		intel_gvt_pm_resume(dev_priv->gvt);
>> +}
>> diff --git a/drivers/gpu/drm/i915/intel_gvt.h b/drivers/gpu/drm/i915/intel_gvt.h
>> index 502fad8a8652..d7d3fb6186fd 100644
>> --- a/drivers/gpu/drm/i915/intel_gvt.h
>> +++ b/drivers/gpu/drm/i915/intel_gvt.h
>> @@ -33,6 +33,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv);
>>   void intel_gvt_clean_device(struct drm_i915_private *dev_priv);
>>   int intel_gvt_init_host(void);
>>   void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv);
>> +void intel_gvt_resume(struct drm_i915_private *dev_priv);
>>   #else
>>   static inline int intel_gvt_init(struct drm_i915_private *dev_priv)
>>   {
>> @@ -46,6 +47,10 @@ static inline void intel_gvt_driver_remove(struct drm_i915_private *dev_priv)
>>   static inline void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv)
>>   {
>>   }
>> +
>> +static inline void intel_gvt_resume(struct drm_i915_private *dev_priv)
>> +{
>> +}
>>   #endif
>>   
>>   #endif /* _INTEL_GVT_H_ */
>> -- 
>> 2.29.0
>>
>> _______________________________________________
>> intel-gvt-dev mailing list
>> intel-gvt-dev@lists.freedesktop.org
>> https://lists.freedesktop.org/mailman/listinfo/intel-gvt-dev
Zhenyu Wang Oct. 27, 2020, 1:49 a.m. UTC | #3
On 2020.10.27 08:42:40 +0800, Colin Xu wrote:
> 
> On 2020-10-26 17:19, Zhenyu Wang wrote:
> > On 2020.10.23 16:17:19 +0800, Colin Xu wrote:
> > > This patch save/restore necessary GVT info during i915 suspend/resume so
> > > that GVT enabled QEMU VM can continue running.
> > > 
> > > Only GGTT and fence regs are saved/restored now. GVT will save GGTT
> > > entries on each host_entry update, restore the saved dirty entries
> > > and re-init fence regs in resume routine.
> > > 
> > > V2:
> > > - Change kzalloc/kfree to vzalloc/vfree since the space allocated
> > > from kmalloc may not enough for all saved GGTT entries.
> > > - Keep gvt suspend/resume wrapper in intel_gvt.h/intel_gvt.c and
> > > move the actual implementation to gvt.h/gvt.c. (zhenyu)
> > > - Check gvt config on and active with intel_gvt_active(). (zhenyu)
> > > 
> > > V3: (zhenyu)
> > > - Incorrect copy length. Should be num entries * entry size.
> > > - Use memcpy_toio()/memcpy_fromio() instead of memcpy for iomem.
> > > - Add F_PM_SAVE flags to indicate which MMIOs to save/restore for PM.
> > > 
> > > V4:
> > > Rebase.
> > > 
> > > V5:
> > > Fail intel_gvt_save_ggtt as -ENOMEM if fail to alloc memory to save
> > > ggtt. Free allocated ggtt_entries on failure.
> > > 
> > > V6:
> > > Save host entry to per-vGPU gtt.ggtt_mm on each host_entry update.
> > > 
> > > Signed-off-by: Hang Yuan <hang.yuan@linux.intel.com>
> > > Signed-off-by: Colin Xu <colin.xu@intel.com>
> > > ---
> > >   drivers/gpu/drm/i915/gvt/gtt.c      | 75 +++++++++++++++++++++++++++++
> > >   drivers/gpu/drm/i915/gvt/gtt.h      |  9 ++++
> > >   drivers/gpu/drm/i915/gvt/gvt.c      |  8 +++
> > >   drivers/gpu/drm/i915/gvt/gvt.h      |  3 ++
> > >   drivers/gpu/drm/i915/gvt/handlers.c | 39 +++++++++++++--
> > >   drivers/gpu/drm/i915/gvt/mmio.h     |  3 ++
> > >   drivers/gpu/drm/i915/intel_gvt.c    | 15 ++++++
> > >   drivers/gpu/drm/i915/intel_gvt.h    |  5 ++
> > >   8 files changed, 154 insertions(+), 3 deletions(-)
> > > 
> > > diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
> > > index a3a4305eda01..33385d640cb9 100644
> > > --- a/drivers/gpu/drm/i915/gvt/gtt.c
> > > +++ b/drivers/gpu/drm/i915/gvt/gtt.c
> > > @@ -636,9 +636,20 @@ static void ggtt_set_host_entry(struct intel_vgpu_mm *mm,
> > >   		struct intel_gvt_gtt_entry *entry, unsigned long index)
> > >   {
> > >   	struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
> > > +	unsigned long offset = index;
> > >   	GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
> > > +	if (vgpu_gmadr_is_aperture(mm->vgpu, index << I915_GTT_PAGE_SHIFT)) {
> > > +		offset -= (vgpu_aperture_gmadr_base(mm->vgpu) >> PAGE_SHIFT);
> > > +		mm->ggtt_mm.host_aperture[offset].val64 = entry->val64;
> > > +		mm->ggtt_mm.host_aperture[offset].dirty = true;
> > > +	} else if (vgpu_gmadr_is_hidden(mm->vgpu, index << I915_GTT_PAGE_SHIFT)) {
> > > +		offset -= (vgpu_hidden_gmadr_base(mm->vgpu) >> PAGE_SHIFT);
> > > +		mm->ggtt_mm.host_hidden[offset].val64 = entry->val64;
> > > +		mm->ggtt_mm.host_hidden[offset].dirty = true;
> > > +	}
> > Looks 'dirty' flag is not needed at all, as what you need is to restore all present
> > entries, so can just check present bit of val64 for that.
> 
> Strictly it's different. Host could update a page which isn't present but
> other PTE fields are still valid, this is "present". For dirty it's purpose
> is to record whether or not host has update this entry, regardless the page
> presence. If only check the present bit, there will be a case that previous
> PTE points to region that shouldn't be walked by the new PDE, but new
> PDE/PTE update only updates PDE w/ present, but PTE w/o present. That could
> leak information by new PDE + old PTE. Hypervisor should follow guest
> behavior to update all PDE/PTEs, and guest should make sure partial update
> won't leak information. If hypervisor doesn't follow guest page update,
> information could be leaked.

But this is only for GGTT which is just single level table right?

> 
> And actually, when vGPU created, all GGTT pages are updates so in practice,
> all entries are dirty. So from real practice, it's true that dirty check can
> be removed.

You mean guest would update all entries normally? That's ok which we just follow
guest's behavior. And we've also seen guest partial update of GGTT entry which
could be point for suspend/resume and we reset present bit at that time so can be
skipped in this case.

> 
> > 
> > > +
> > >   	pte_ops->set_entry(NULL, entry, index, false, 0, mm->vgpu);
> > >   }
> > > @@ -1928,6 +1939,7 @@ static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu)
> > >   {
> > >   	struct intel_vgpu_mm *mm;
> > >   	unsigned long nr_entries;
> > > +	u64 size;
> > >   	mm = vgpu_alloc_mm(vgpu);
> > >   	if (!mm)
> > > @@ -1944,6 +1956,25 @@ static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu)
> > >   		return ERR_PTR(-ENOMEM);
> > >   	}
> > > +	size = (vgpu_aperture_sz(vgpu) >> PAGE_SHIFT) *
> > > +		sizeof(struct intel_vgpu_host_ggtt);
> > > +	mm->ggtt_mm.host_aperture = vzalloc(size);
> > I think normally just write required size calculation in alloc parameter
> > instead of in extra variable.
> Only for trim line length within 80 otherwise align to parentheses doesn't
> look clean. Since kernel has 80 columns as the 'strong preferred limit', if
> it's OK I'll put it into same line then no extra variable is required.
> >

No, that strong word is deprecated. ;)

> > > +	if (!mm->ggtt_mm.host_aperture) {
> > > +		vfree(mm->ggtt_mm.virtual_ggtt);
> > > +		vgpu_free_mm(mm);
> > > +		return ERR_PTR(-ENOMEM);
> > > +	}
> > > +
> > > +	size = (vgpu_hidden_sz(vgpu) >> PAGE_SHIFT) *
> > > +		sizeof(struct intel_vgpu_host_ggtt);
> > > +	mm->ggtt_mm.host_hidden = vzalloc(size);
> > > +	if (!mm->ggtt_mm.host_hidden) {
> > > +		vfree(mm->ggtt_mm.host_aperture);
> > > +		vfree(mm->ggtt_mm.virtual_ggtt);
> > > +		vgpu_free_mm(mm);
> > > +		return ERR_PTR(-ENOMEM);
> > > +	}
> > > +
> > >   	return mm;
> > >   }
> > > @@ -1971,6 +2002,8 @@ void _intel_vgpu_mm_release(struct kref *mm_ref)
> > >   		invalidate_ppgtt_mm(mm);
> > >   	} else {
> > >   		vfree(mm->ggtt_mm.virtual_ggtt);
> > > +		vfree(mm->ggtt_mm.host_aperture);
> > > +		vfree(mm->ggtt_mm.host_hidden);
> > >   	}
> > >   	vgpu_free_mm(mm);
> > > @@ -2852,3 +2885,45 @@ void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu)
> > >   	intel_vgpu_destroy_all_ppgtt_mm(vgpu);
> > >   	intel_vgpu_reset_ggtt(vgpu, true);
> > >   }
> > > +
> > > +/**
> > > + * intel_gvt_restore_ggtt - restore all vGPU's ggtt entries
> > > + * @gvt: intel gvt device
> > > + *
> > > + * This function is called at driver resume stage to restore
> > > + * GGTT entries of every vGPU.
> > > + *
> > > + */
> > > +void intel_gvt_restore_ggtt(struct intel_gvt *gvt)
> > > +{
> > > +	struct intel_vgpu *vgpu;
> > > +	struct intel_vgpu_mm *mm;
> > > +	struct intel_vgpu_host_ggtt *host_ggtt;
> > > +	int id;
> > > +	u32 idx, num_low, num_hi, offset;
> > > +
> > > +	/* Restore dirty host ggtt for all vGPUs */
> > > +	idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) {
> > > +		mm = vgpu->gtt.ggtt_mm;
> > > +
> > > +		num_low = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT;
> > > +		offset = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT;
> > > +		for (idx = 0; idx < num_low; idx++) {
> > > +			host_ggtt = &mm->ggtt_mm.host_aperture[idx];
> > > +			if (host_ggtt->dirty) {
> > > +				write_pte64(vgpu->gvt->gt->ggtt,
> > > +					    offset + idx, host_ggtt->val64);
> > > +			}
> > > +		}
> > > +
> > > +		num_hi = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT;
> > > +		offset = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT;
> > > +		for (idx = 0; idx < num_hi; idx++) {
> > > +			host_ggtt = &mm->ggtt_mm.host_hidden[idx];
> > > +			if (host_ggtt->dirty) {
> > > +				write_pte64(vgpu->gvt->gt->ggtt,
> > > +					    offset + idx, host_ggtt->val64);
> > > +			}
> > > +		}
> > > +	}
> > > +}
> > > diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
> > > index 52d0d88abd86..7ec333a2ead5 100644
> > > --- a/drivers/gpu/drm/i915/gvt/gtt.h
> > > +++ b/drivers/gpu/drm/i915/gvt/gtt.h
> > > @@ -139,6 +139,11 @@ struct intel_gvt_partial_pte {
> > >   	struct list_head list;
> > >   };
> > > +struct intel_vgpu_host_ggtt {
> > > +	u64 val64;
> > > +	bool dirty;
> > > +};
> > > +
> > >   struct intel_vgpu_mm {
> > >   	enum intel_gvt_mm_type type;
> > >   	struct intel_vgpu *vgpu;
> > > @@ -164,6 +169,9 @@ struct intel_vgpu_mm {
> > >   		} ppgtt_mm;
> > >   		struct {
> > >   			void *virtual_ggtt;
> > > +			/* Save/restore for PM */
> > > +			struct intel_vgpu_host_ggtt *host_aperture;
> > > +			struct intel_vgpu_host_ggtt *host_hidden;
> > >   			struct list_head partial_pte_list;
> > >   		} ggtt_mm;
> > >   	};
> > > @@ -280,5 +288,6 @@ int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu,
> > >   	unsigned int off, void *p_data, unsigned int bytes);
> > >   void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu);
> > > +void intel_gvt_restore_ggtt(struct intel_gvt *gvt);
> > >   #endif /* _GVT_GTT_H_ */
> > > diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
> > > index c7c561237883..d0e9c43b031b 100644
> > > --- a/drivers/gpu/drm/i915/gvt/gvt.c
> > > +++ b/drivers/gpu/drm/i915/gvt/gvt.c
> > > @@ -405,6 +405,14 @@ int intel_gvt_init_device(struct drm_i915_private *i915)
> > >   	return ret;
> > >   }
> > > +int
> > > +intel_gvt_pm_resume(struct intel_gvt *gvt)
> > > +{
> > > +	intel_gvt_restore_regs(gvt);
> > > +	intel_gvt_restore_ggtt(gvt);
> > > +	return 0;
> > Please split restore_regs part which seems to be separate.
> Split intel_gvt_restore_regs() further to fence regs and normal tracked
> mmios with F_PM_SAVE?
> >

yep, those for regs restore handling.

Thanks

> > > +}
> > > +
> > >   int
> > >   intel_gvt_register_hypervisor(struct intel_gvt_mpt *m)
> > >   {
> > > diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
> > > index a81cf0f01e78..b3d6355dd797 100644
> > > --- a/drivers/gpu/drm/i915/gvt/gvt.h
> > > +++ b/drivers/gpu/drm/i915/gvt/gvt.h
> > > @@ -255,6 +255,8 @@ struct intel_gvt_mmio {
> > >   #define F_CMD_ACCESS	(1 << 3)
> > >   /* This reg has been accessed by a VM */
> > >   #define F_ACCESSED	(1 << 4)
> > > +/* This reg requires save & restore during host PM suspend/resume */
> > > +#define F_PM_SAVE	(1 << 5)
> > >   /* This reg could be accessed by unaligned address */
> > >   #define F_UNALIGN	(1 << 6)
> > >   /* This reg is in GVT's mmio save-restor list and in hardware
> > > @@ -685,6 +687,7 @@ void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu);
> > >   void intel_gvt_debugfs_init(struct intel_gvt *gvt);
> > >   void intel_gvt_debugfs_clean(struct intel_gvt *gvt);
> > > +int intel_gvt_pm_resume(struct intel_gvt *gvt);
> > >   #include "trace.h"
> > >   #include "mpt.h"
> > > diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
> > > index 89353b97589a..1f2432d7df34 100644
> > > --- a/drivers/gpu/drm/i915/gvt/handlers.c
> > > +++ b/drivers/gpu/drm/i915/gvt/handlers.c
> > > @@ -3120,9 +3120,10 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
> > >   	MMIO_DFH(TRVATTL3PTRDW(2), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
> > >   	MMIO_DFH(TRVATTL3PTRDW(3), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
> > >   	MMIO_DFH(TRVADR, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
> > > -	MMIO_DFH(TRTTE, D_SKL_PLUS, F_CMD_ACCESS,
> > > -		NULL, gen9_trtte_write);
> > > -	MMIO_DH(_MMIO(0x4dfc), D_SKL_PLUS, NULL, gen9_trtt_chicken_write);
> > > +	MMIO_DFH(TRTTE, D_SKL_PLUS, F_CMD_ACCESS | F_PM_SAVE,
> > > +		 NULL, gen9_trtte_write);
> > > +	MMIO_DFH(_MMIO(0x4dfc), D_SKL_PLUS, F_PM_SAVE,
> > > +		 NULL, gen9_trtt_chicken_write);
> > >   	MMIO_D(_MMIO(0x46430), D_SKL_PLUS);
> > > @@ -3661,3 +3662,35 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
> > >   		intel_vgpu_default_mmio_read(vgpu, offset, pdata, bytes) :
> > >   		intel_vgpu_default_mmio_write(vgpu, offset, pdata, bytes);
> > >   }
> > > +
> > > +static inline int mmio_pm_restore_handler(struct intel_gvt *gvt,
> > > +					  u32 offset, void *data)
> > > +{
> > > +	struct intel_vgpu *vgpu = data;
> > > +	struct drm_i915_private *dev_priv = gvt->gt->i915;
> > > +
> > > +	if (gvt->mmio.mmio_attribute[offset >> 2] & F_PM_SAVE)
> > > +		I915_WRITE(_MMIO(offset), vgpu_vreg(vgpu, offset));
> > > +
> > > +	return 0;
> > > +}
> > > +
> > > +void intel_gvt_restore_regs(struct intel_gvt *gvt)
> > > +{
> > > +	struct intel_vgpu *vgpu;
> > > +	int i, id;
> > > +	u64 val;
> > > +
> > > +	idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) {
> > > +		mmio_hw_access_pre(gvt->gt);
> > > +		for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
> > > +			val = vgpu_vreg64(vgpu, fence_num_to_offset(i));
> > > +			intel_vgpu_write_fence(vgpu, i, val);
> > > +		}
> > > +
> > > +		intel_gvt_for_each_tracked_mmio(gvt,
> > > +						mmio_pm_restore_handler, vgpu);
> > > +
> > > +		mmio_hw_access_post(gvt->gt);
> > > +	}
> > > +}
> > > diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h
> > > index cc4812648bf4..999d9dda0614 100644
> > > --- a/drivers/gpu/drm/i915/gvt/mmio.h
> > > +++ b/drivers/gpu/drm/i915/gvt/mmio.h
> > > @@ -104,4 +104,7 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
> > >   int intel_vgpu_mask_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
> > >   				  void *p_data, unsigned int bytes);
> > > +
> > > +void intel_gvt_restore_regs(struct intel_gvt *gvt);
> > > +
> > >   #endif
> > > diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
> > > index 99fe8aef1c67..4e70c1a9ef2e 100644
> > > --- a/drivers/gpu/drm/i915/intel_gvt.c
> > > +++ b/drivers/gpu/drm/i915/intel_gvt.c
> > > @@ -24,6 +24,7 @@
> > >   #include "i915_drv.h"
> > >   #include "i915_vgpu.h"
> > >   #include "intel_gvt.h"
> > > +#include "gvt/gvt.h"
> > >   /**
> > >    * DOC: Intel GVT-g host support
> > > @@ -147,3 +148,17 @@ void intel_gvt_driver_remove(struct drm_i915_private *dev_priv)
> > >   	intel_gvt_clean_device(dev_priv);
> > >   }
> > > +
> > > +/**
> > > + * intel_gvt_resume - GVT resume routine wapper
> > > + *
> > > + * @dev_priv: drm i915 private *
> > > + *
> > > + * This function is called at the i915 driver resume stage to restore required
> > > + * HW status for GVT so that vGPU can continue running after resumed.
> > > + */
> > > +void intel_gvt_resume(struct drm_i915_private *dev_priv)
> > > +{
> > > +	if (intel_gvt_active(dev_priv))
> > > +		intel_gvt_pm_resume(dev_priv->gvt);
> > > +}
> > > diff --git a/drivers/gpu/drm/i915/intel_gvt.h b/drivers/gpu/drm/i915/intel_gvt.h
> > > index 502fad8a8652..d7d3fb6186fd 100644
> > > --- a/drivers/gpu/drm/i915/intel_gvt.h
> > > +++ b/drivers/gpu/drm/i915/intel_gvt.h
> > > @@ -33,6 +33,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv);
> > >   void intel_gvt_clean_device(struct drm_i915_private *dev_priv);
> > >   int intel_gvt_init_host(void);
> > >   void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv);
> > > +void intel_gvt_resume(struct drm_i915_private *dev_priv);
> > >   #else
> > >   static inline int intel_gvt_init(struct drm_i915_private *dev_priv)
> > >   {
> > > @@ -46,6 +47,10 @@ static inline void intel_gvt_driver_remove(struct drm_i915_private *dev_priv)
> > >   static inline void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv)
> > >   {
> > >   }
> > > +
> > > +static inline void intel_gvt_resume(struct drm_i915_private *dev_priv)
> > > +{
> > > +}
> > >   #endif
> > >   #endif /* _INTEL_GVT_H_ */
> > > -- 
> > > 2.29.0
> > > 
> > > _______________________________________________
> > > intel-gvt-dev mailing list
> > > intel-gvt-dev@lists.freedesktop.org
> > > https://lists.freedesktop.org/mailman/listinfo/intel-gvt-dev
> 
> -- 
> Best Regards,
> Colin Xu
>
Zhenyu Wang Oct. 27, 2020, 2:02 a.m. UTC | #4
On 2020.10.27 10:16:08 +0800, Colin Xu wrote:
> 
> On 2020-10-27 09:49, Zhenyu Wang wrote:
> > On 2020.10.27 08:42:40 +0800, Colin Xu wrote:
> > > On 2020-10-26 17:19, Zhenyu Wang wrote:
> > > > On 2020.10.23 16:17:19 +0800, Colin Xu wrote:
> > > > > This patch save/restore necessary GVT info during i915 suspend/resume so
> > > > > that GVT enabled QEMU VM can continue running.
> > > > > 
> > > > > Only GGTT and fence regs are saved/restored now. GVT will save GGTT
> > > > > entries on each host_entry update, restore the saved dirty entries
> > > > > and re-init fence regs in resume routine.
> > > > > 
> > > > > V2:
> > > > > - Change kzalloc/kfree to vzalloc/vfree since the space allocated
> > > > > from kmalloc may not enough for all saved GGTT entries.
> > > > > - Keep gvt suspend/resume wrapper in intel_gvt.h/intel_gvt.c and
> > > > > move the actual implementation to gvt.h/gvt.c. (zhenyu)
> > > > > - Check gvt config on and active with intel_gvt_active(). (zhenyu)
> > > > > 
> > > > > V3: (zhenyu)
> > > > > - Incorrect copy length. Should be num entries * entry size.
> > > > > - Use memcpy_toio()/memcpy_fromio() instead of memcpy for iomem.
> > > > > - Add F_PM_SAVE flags to indicate which MMIOs to save/restore for PM.
> > > > > 
> > > > > V4:
> > > > > Rebase.
> > > > > 
> > > > > V5:
> > > > > Fail intel_gvt_save_ggtt as -ENOMEM if fail to alloc memory to save
> > > > > ggtt. Free allocated ggtt_entries on failure.
> > > > > 
> > > > > V6:
> > > > > Save host entry to per-vGPU gtt.ggtt_mm on each host_entry update.
> > > > > 
> > > > > Signed-off-by: Hang Yuan <hang.yuan@linux.intel.com>
> > > > > Signed-off-by: Colin Xu <colin.xu@intel.com>
> > > > > ---
> > > > >    drivers/gpu/drm/i915/gvt/gtt.c      | 75 +++++++++++++++++++++++++++++
> > > > >    drivers/gpu/drm/i915/gvt/gtt.h      |  9 ++++
> > > > >    drivers/gpu/drm/i915/gvt/gvt.c      |  8 +++
> > > > >    drivers/gpu/drm/i915/gvt/gvt.h      |  3 ++
> > > > >    drivers/gpu/drm/i915/gvt/handlers.c | 39 +++++++++++++--
> > > > >    drivers/gpu/drm/i915/gvt/mmio.h     |  3 ++
> > > > >    drivers/gpu/drm/i915/intel_gvt.c    | 15 ++++++
> > > > >    drivers/gpu/drm/i915/intel_gvt.h    |  5 ++
> > > > >    8 files changed, 154 insertions(+), 3 deletions(-)
> > > > > 
> > > > > diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
> > > > > index a3a4305eda01..33385d640cb9 100644
> > > > > --- a/drivers/gpu/drm/i915/gvt/gtt.c
> > > > > +++ b/drivers/gpu/drm/i915/gvt/gtt.c
> > > > > @@ -636,9 +636,20 @@ static void ggtt_set_host_entry(struct intel_vgpu_mm *mm,
> > > > >    		struct intel_gvt_gtt_entry *entry, unsigned long index)
> > > > >    {
> > > > >    	struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
> > > > > +	unsigned long offset = index;
> > > > >    	GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
> > > > > +	if (vgpu_gmadr_is_aperture(mm->vgpu, index << I915_GTT_PAGE_SHIFT)) {
> > > > > +		offset -= (vgpu_aperture_gmadr_base(mm->vgpu) >> PAGE_SHIFT);
> > > > > +		mm->ggtt_mm.host_aperture[offset].val64 = entry->val64;
> > > > > +		mm->ggtt_mm.host_aperture[offset].dirty = true;
> > > > > +	} else if (vgpu_gmadr_is_hidden(mm->vgpu, index << I915_GTT_PAGE_SHIFT)) {
> > > > > +		offset -= (vgpu_hidden_gmadr_base(mm->vgpu) >> PAGE_SHIFT);
> > > > > +		mm->ggtt_mm.host_hidden[offset].val64 = entry->val64;
> > > > > +		mm->ggtt_mm.host_hidden[offset].dirty = true;
> > > > > +	}
> > > > Looks 'dirty' flag is not needed at all, as what you need is to restore all present
> > > > entries, so can just check present bit of val64 for that.
> > > Strictly it's different. Host could update a page which isn't present but
> > > other PTE fields are still valid, this is "present". For dirty it's purpose
> > > is to record whether or not host has update this entry, regardless the page
> > > presence. If only check the present bit, there will be a case that previous
> > > PTE points to region that shouldn't be walked by the new PDE, but new
> > > PDE/PTE update only updates PDE w/ present, but PTE w/o present. That could
> > > leak information by new PDE + old PTE. Hypervisor should follow guest
> > > behavior to update all PDE/PTEs, and guest should make sure partial update
> > > won't leak information. If hypervisor doesn't follow guest page update,
> > > information could be leaked.
> > But this is only for GGTT which is just single level table right?
> > 
> > > And actually, when vGPU created, all GGTT pages are updates so in practice,
> > > all entries are dirty. So from real practice, it's true that dirty check can
> > > be removed.
> > You mean guest would update all entries normally? That's ok which we just follow
> > guest's behavior. And we've also seen guest partial update of GGTT entry which
> > could be point for suspend/resume and we reset present bit at that time so can be
> > skipped in this case.
> Yes guest would update all. So should I check present again, or just
> save/restore all?
> >

I mean we could depend on present bit which not only represents guest state and
also could handle for partial update case as well. 

> > > > > +
> > > > >    	pte_ops->set_entry(NULL, entry, index, false, 0, mm->vgpu);
> > > > >    }
> > > > > @@ -1928,6 +1939,7 @@ static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu)
> > > > >    {
> > > > >    	struct intel_vgpu_mm *mm;
> > > > >    	unsigned long nr_entries;
> > > > > +	u64 size;
> > > > >    	mm = vgpu_alloc_mm(vgpu);
> > > > >    	if (!mm)
> > > > > @@ -1944,6 +1956,25 @@ static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu)
> > > > >    		return ERR_PTR(-ENOMEM);
> > > > >    	}
> > > > > +	size = (vgpu_aperture_sz(vgpu) >> PAGE_SHIFT) *
> > > > > +		sizeof(struct intel_vgpu_host_ggtt);
> > > > > +	mm->ggtt_mm.host_aperture = vzalloc(size);
> > > > I think normally just write required size calculation in alloc parameter
> > > > instead of in extra variable.
> > > Only for trim line length within 80 otherwise align to parentheses doesn't
> > > look clean. Since kernel has 80 columns as the 'strong preferred limit', if
> > > it's OK I'll put it into same line then no extra variable is required.
> > No, that strong word is deprecated. ;)
> It makes the things easier.
> > 
> > > > > +	if (!mm->ggtt_mm.host_aperture) {
> > > > > +		vfree(mm->ggtt_mm.virtual_ggtt);
> > > > > +		vgpu_free_mm(mm);
> > > > > +		return ERR_PTR(-ENOMEM);
> > > > > +	}
> > > > > +
> > > > > +	size = (vgpu_hidden_sz(vgpu) >> PAGE_SHIFT) *
> > > > > +		sizeof(struct intel_vgpu_host_ggtt);
> > > > > +	mm->ggtt_mm.host_hidden = vzalloc(size);
> > > > > +	if (!mm->ggtt_mm.host_hidden) {
> > > > > +		vfree(mm->ggtt_mm.host_aperture);
> > > > > +		vfree(mm->ggtt_mm.virtual_ggtt);
> > > > > +		vgpu_free_mm(mm);
> > > > > +		return ERR_PTR(-ENOMEM);
> > > > > +	}
> > > > > +
> > > > >    	return mm;
> > > > >    }
> > > > > @@ -1971,6 +2002,8 @@ void _intel_vgpu_mm_release(struct kref *mm_ref)
> > > > >    		invalidate_ppgtt_mm(mm);
> > > > >    	} else {
> > > > >    		vfree(mm->ggtt_mm.virtual_ggtt);
> > > > > +		vfree(mm->ggtt_mm.host_aperture);
> > > > > +		vfree(mm->ggtt_mm.host_hidden);
> > > > >    	}
> > > > >    	vgpu_free_mm(mm);
> > > > > @@ -2852,3 +2885,45 @@ void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu)
> > > > >    	intel_vgpu_destroy_all_ppgtt_mm(vgpu);
> > > > >    	intel_vgpu_reset_ggtt(vgpu, true);
> > > > >    }
> > > > > +
> > > > > +/**
> > > > > + * intel_gvt_restore_ggtt - restore all vGPU's ggtt entries
> > > > > + * @gvt: intel gvt device
> > > > > + *
> > > > > + * This function is called at driver resume stage to restore
> > > > > + * GGTT entries of every vGPU.
> > > > > + *
> > > > > + */
> > > > > +void intel_gvt_restore_ggtt(struct intel_gvt *gvt)
> > > > > +{
> > > > > +	struct intel_vgpu *vgpu;
> > > > > +	struct intel_vgpu_mm *mm;
> > > > > +	struct intel_vgpu_host_ggtt *host_ggtt;
> > > > > +	int id;
> > > > > +	u32 idx, num_low, num_hi, offset;
> > > > > +
> > > > > +	/* Restore dirty host ggtt for all vGPUs */
> > > > > +	idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) {
> > > > > +		mm = vgpu->gtt.ggtt_mm;
> > > > > +
> > > > > +		num_low = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT;
> > > > > +		offset = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT;
> > > > > +		for (idx = 0; idx < num_low; idx++) {
> > > > > +			host_ggtt = &mm->ggtt_mm.host_aperture[idx];
> > > > > +			if (host_ggtt->dirty) {
> > > > > +				write_pte64(vgpu->gvt->gt->ggtt,
> > > > > +					    offset + idx, host_ggtt->val64);
> > > > > +			}
> > > > > +		}
> > > > > +
> > > > > +		num_hi = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT;
> > > > > +		offset = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT;
> > > > > +		for (idx = 0; idx < num_hi; idx++) {
> > > > > +			host_ggtt = &mm->ggtt_mm.host_hidden[idx];
> > > > > +			if (host_ggtt->dirty) {
> > > > > +				write_pte64(vgpu->gvt->gt->ggtt,
> > > > > +					    offset + idx, host_ggtt->val64);
> > > > > +			}
> > > > > +		}
> > > > > +	}
> > > > > +}
> > > > > diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
> > > > > index 52d0d88abd86..7ec333a2ead5 100644
> > > > > --- a/drivers/gpu/drm/i915/gvt/gtt.h
> > > > > +++ b/drivers/gpu/drm/i915/gvt/gtt.h
> > > > > @@ -139,6 +139,11 @@ struct intel_gvt_partial_pte {
> > > > >    	struct list_head list;
> > > > >    };
> > > > > +struct intel_vgpu_host_ggtt {
> > > > > +	u64 val64;
> > > > > +	bool dirty;
> > > > > +};
> > > > > +
> > > > >    struct intel_vgpu_mm {
> > > > >    	enum intel_gvt_mm_type type;
> > > > >    	struct intel_vgpu *vgpu;
> > > > > @@ -164,6 +169,9 @@ struct intel_vgpu_mm {
> > > > >    		} ppgtt_mm;
> > > > >    		struct {
> > > > >    			void *virtual_ggtt;
> > > > > +			/* Save/restore for PM */
> > > > > +			struct intel_vgpu_host_ggtt *host_aperture;
> > > > > +			struct intel_vgpu_host_ggtt *host_hidden;
> > > > >    			struct list_head partial_pte_list;
> > > > >    		} ggtt_mm;
> > > > >    	};
> > > > > @@ -280,5 +288,6 @@ int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu,
> > > > >    	unsigned int off, void *p_data, unsigned int bytes);
> > > > >    void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu);
> > > > > +void intel_gvt_restore_ggtt(struct intel_gvt *gvt);
> > > > >    #endif /* _GVT_GTT_H_ */
> > > > > diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
> > > > > index c7c561237883..d0e9c43b031b 100644
> > > > > --- a/drivers/gpu/drm/i915/gvt/gvt.c
> > > > > +++ b/drivers/gpu/drm/i915/gvt/gvt.c
> > > > > @@ -405,6 +405,14 @@ int intel_gvt_init_device(struct drm_i915_private *i915)
> > > > >    	return ret;
> > > > >    }
> > > > > +int
> > > > > +intel_gvt_pm_resume(struct intel_gvt *gvt)
> > > > > +{
> > > > > +	intel_gvt_restore_regs(gvt);
> > > > > +	intel_gvt_restore_ggtt(gvt);
> > > > > +	return 0;
> > > > Please split restore_regs part which seems to be separate.
> > > Split intel_gvt_restore_regs() further to fence regs and normal tracked
> > > mmios with F_PM_SAVE?
> > yep, those for regs restore handling.
> > 
> > Thanks
> > 
> > > > > +}
> > > > > +
> > > > >    int
> > > > >    intel_gvt_register_hypervisor(struct intel_gvt_mpt *m)
> > > > >    {
> > > > > diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
> > > > > index a81cf0f01e78..b3d6355dd797 100644
> > > > > --- a/drivers/gpu/drm/i915/gvt/gvt.h
> > > > > +++ b/drivers/gpu/drm/i915/gvt/gvt.h
> > > > > @@ -255,6 +255,8 @@ struct intel_gvt_mmio {
> > > > >    #define F_CMD_ACCESS	(1 << 3)
> > > > >    /* This reg has been accessed by a VM */
> > > > >    #define F_ACCESSED	(1 << 4)
> > > > > +/* This reg requires save & restore during host PM suspend/resume */
> > > > > +#define F_PM_SAVE	(1 << 5)
> > > > >    /* This reg could be accessed by unaligned address */
> > > > >    #define F_UNALIGN	(1 << 6)
> > > > >    /* This reg is in GVT's mmio save-restor list and in hardware
> > > > > @@ -685,6 +687,7 @@ void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu);
> > > > >    void intel_gvt_debugfs_init(struct intel_gvt *gvt);
> > > > >    void intel_gvt_debugfs_clean(struct intel_gvt *gvt);
> > > > > +int intel_gvt_pm_resume(struct intel_gvt *gvt);
> > > > >    #include "trace.h"
> > > > >    #include "mpt.h"
> > > > > diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
> > > > > index 89353b97589a..1f2432d7df34 100644
> > > > > --- a/drivers/gpu/drm/i915/gvt/handlers.c
> > > > > +++ b/drivers/gpu/drm/i915/gvt/handlers.c
> > > > > @@ -3120,9 +3120,10 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
> > > > >    	MMIO_DFH(TRVATTL3PTRDW(2), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
> > > > >    	MMIO_DFH(TRVATTL3PTRDW(3), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
> > > > >    	MMIO_DFH(TRVADR, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
> > > > > -	MMIO_DFH(TRTTE, D_SKL_PLUS, F_CMD_ACCESS,
> > > > > -		NULL, gen9_trtte_write);
> > > > > -	MMIO_DH(_MMIO(0x4dfc), D_SKL_PLUS, NULL, gen9_trtt_chicken_write);
> > > > > +	MMIO_DFH(TRTTE, D_SKL_PLUS, F_CMD_ACCESS | F_PM_SAVE,
> > > > > +		 NULL, gen9_trtte_write);
> > > > > +	MMIO_DFH(_MMIO(0x4dfc), D_SKL_PLUS, F_PM_SAVE,
> > > > > +		 NULL, gen9_trtt_chicken_write);
> > > > >    	MMIO_D(_MMIO(0x46430), D_SKL_PLUS);
> > > > > @@ -3661,3 +3662,35 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
> > > > >    		intel_vgpu_default_mmio_read(vgpu, offset, pdata, bytes) :
> > > > >    		intel_vgpu_default_mmio_write(vgpu, offset, pdata, bytes);
> > > > >    }
> > > > > +
> > > > > +static inline int mmio_pm_restore_handler(struct intel_gvt *gvt,
> > > > > +					  u32 offset, void *data)
> > > > > +{
> > > > > +	struct intel_vgpu *vgpu = data;
> > > > > +	struct drm_i915_private *dev_priv = gvt->gt->i915;
> > > > > +
> > > > > +	if (gvt->mmio.mmio_attribute[offset >> 2] & F_PM_SAVE)
> > > > > +		I915_WRITE(_MMIO(offset), vgpu_vreg(vgpu, offset));
> > > > > +
> > > > > +	return 0;
> > > > > +}
> > > > > +
> > > > > +void intel_gvt_restore_regs(struct intel_gvt *gvt)
> > > > > +{
> > > > > +	struct intel_vgpu *vgpu;
> > > > > +	int i, id;
> > > > > +	u64 val;
> > > > > +
> > > > > +	idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) {
> > > > > +		mmio_hw_access_pre(gvt->gt);
> > > > > +		for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
> > > > > +			val = vgpu_vreg64(vgpu, fence_num_to_offset(i));
> > > > > +			intel_vgpu_write_fence(vgpu, i, val);
> > > > > +		}
> > > > > +
> > > > > +		intel_gvt_for_each_tracked_mmio(gvt,
> > > > > +						mmio_pm_restore_handler, vgpu);
> > > > > +
> > > > > +		mmio_hw_access_post(gvt->gt);
> > > > > +	}
> > > > > +}
> > > > > diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h
> > > > > index cc4812648bf4..999d9dda0614 100644
> > > > > --- a/drivers/gpu/drm/i915/gvt/mmio.h
> > > > > +++ b/drivers/gpu/drm/i915/gvt/mmio.h
> > > > > @@ -104,4 +104,7 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
> > > > >    int intel_vgpu_mask_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
> > > > >    				  void *p_data, unsigned int bytes);
> > > > > +
> > > > > +void intel_gvt_restore_regs(struct intel_gvt *gvt);
> > > > > +
> > > > >    #endif
> > > > > diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
> > > > > index 99fe8aef1c67..4e70c1a9ef2e 100644
> > > > > --- a/drivers/gpu/drm/i915/intel_gvt.c
> > > > > +++ b/drivers/gpu/drm/i915/intel_gvt.c
> > > > > @@ -24,6 +24,7 @@
> > > > >    #include "i915_drv.h"
> > > > >    #include "i915_vgpu.h"
> > > > >    #include "intel_gvt.h"
> > > > > +#include "gvt/gvt.h"
> > > > >    /**
> > > > >     * DOC: Intel GVT-g host support
> > > > > @@ -147,3 +148,17 @@ void intel_gvt_driver_remove(struct drm_i915_private *dev_priv)
> > > > >    	intel_gvt_clean_device(dev_priv);
> > > > >    }
> > > > > +
> > > > > +/**
> > > > > + * intel_gvt_resume - GVT resume routine wapper
> > > > > + *
> > > > > + * @dev_priv: drm i915 private *
> > > > > + *
> > > > > + * This function is called at the i915 driver resume stage to restore required
> > > > > + * HW status for GVT so that vGPU can continue running after resumed.
> > > > > + */
> > > > > +void intel_gvt_resume(struct drm_i915_private *dev_priv)
> > > > > +{
> > > > > +	if (intel_gvt_active(dev_priv))
> > > > > +		intel_gvt_pm_resume(dev_priv->gvt);
> > > > > +}
> > > > > diff --git a/drivers/gpu/drm/i915/intel_gvt.h b/drivers/gpu/drm/i915/intel_gvt.h
> > > > > index 502fad8a8652..d7d3fb6186fd 100644
> > > > > --- a/drivers/gpu/drm/i915/intel_gvt.h
> > > > > +++ b/drivers/gpu/drm/i915/intel_gvt.h
> > > > > @@ -33,6 +33,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv);
> > > > >    void intel_gvt_clean_device(struct drm_i915_private *dev_priv);
> > > > >    int intel_gvt_init_host(void);
> > > > >    void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv);
> > > > > +void intel_gvt_resume(struct drm_i915_private *dev_priv);
> > > > >    #else
> > > > >    static inline int intel_gvt_init(struct drm_i915_private *dev_priv)
> > > > >    {
> > > > > @@ -46,6 +47,10 @@ static inline void intel_gvt_driver_remove(struct drm_i915_private *dev_priv)
> > > > >    static inline void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv)
> > > > >    {
> > > > >    }
> > > > > +
> > > > > +static inline void intel_gvt_resume(struct drm_i915_private *dev_priv)
> > > > > +{
> > > > > +}
> > > > >    #endif
> > > > >    #endif /* _INTEL_GVT_H_ */
> > > > > -- 
> > > > > 2.29.0
> > > > > 
> > > > > _______________________________________________
> > > > > intel-gvt-dev mailing list
> > > > > intel-gvt-dev@lists.freedesktop.org
> > > > > https://lists.freedesktop.org/mailman/listinfo/intel-gvt-dev
> > > -- 
> > > Best Regards,
> > > Colin Xu
> > > 
> -- 
> Best Regards,
> Colin Xu
>
Colin Xu Oct. 27, 2020, 2:16 a.m. UTC | #5
On 2020-10-27 09:49, Zhenyu Wang wrote:
> On 2020.10.27 08:42:40 +0800, Colin Xu wrote:
>> On 2020-10-26 17:19, Zhenyu Wang wrote:
>>> On 2020.10.23 16:17:19 +0800, Colin Xu wrote:
>>>> This patch save/restore necessary GVT info during i915 suspend/resume so
>>>> that GVT enabled QEMU VM can continue running.
>>>>
>>>> Only GGTT and fence regs are saved/restored now. GVT will save GGTT
>>>> entries on each host_entry update, restore the saved dirty entries
>>>> and re-init fence regs in resume routine.
>>>>
>>>> V2:
>>>> - Change kzalloc/kfree to vzalloc/vfree since the space allocated
>>>> from kmalloc may not enough for all saved GGTT entries.
>>>> - Keep gvt suspend/resume wrapper in intel_gvt.h/intel_gvt.c and
>>>> move the actual implementation to gvt.h/gvt.c. (zhenyu)
>>>> - Check gvt config on and active with intel_gvt_active(). (zhenyu)
>>>>
>>>> V3: (zhenyu)
>>>> - Incorrect copy length. Should be num entries * entry size.
>>>> - Use memcpy_toio()/memcpy_fromio() instead of memcpy for iomem.
>>>> - Add F_PM_SAVE flags to indicate which MMIOs to save/restore for PM.
>>>>
>>>> V4:
>>>> Rebase.
>>>>
>>>> V5:
>>>> Fail intel_gvt_save_ggtt as -ENOMEM if fail to alloc memory to save
>>>> ggtt. Free allocated ggtt_entries on failure.
>>>>
>>>> V6:
>>>> Save host entry to per-vGPU gtt.ggtt_mm on each host_entry update.
>>>>
>>>> Signed-off-by: Hang Yuan <hang.yuan@linux.intel.com>
>>>> Signed-off-by: Colin Xu <colin.xu@intel.com>
>>>> ---
>>>>    drivers/gpu/drm/i915/gvt/gtt.c      | 75 +++++++++++++++++++++++++++++
>>>>    drivers/gpu/drm/i915/gvt/gtt.h      |  9 ++++
>>>>    drivers/gpu/drm/i915/gvt/gvt.c      |  8 +++
>>>>    drivers/gpu/drm/i915/gvt/gvt.h      |  3 ++
>>>>    drivers/gpu/drm/i915/gvt/handlers.c | 39 +++++++++++++--
>>>>    drivers/gpu/drm/i915/gvt/mmio.h     |  3 ++
>>>>    drivers/gpu/drm/i915/intel_gvt.c    | 15 ++++++
>>>>    drivers/gpu/drm/i915/intel_gvt.h    |  5 ++
>>>>    8 files changed, 154 insertions(+), 3 deletions(-)
>>>>
>>>> diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
>>>> index a3a4305eda01..33385d640cb9 100644
>>>> --- a/drivers/gpu/drm/i915/gvt/gtt.c
>>>> +++ b/drivers/gpu/drm/i915/gvt/gtt.c
>>>> @@ -636,9 +636,20 @@ static void ggtt_set_host_entry(struct intel_vgpu_mm *mm,
>>>>    		struct intel_gvt_gtt_entry *entry, unsigned long index)
>>>>    {
>>>>    	struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
>>>> +	unsigned long offset = index;
>>>>    	GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
>>>> +	if (vgpu_gmadr_is_aperture(mm->vgpu, index << I915_GTT_PAGE_SHIFT)) {
>>>> +		offset -= (vgpu_aperture_gmadr_base(mm->vgpu) >> PAGE_SHIFT);
>>>> +		mm->ggtt_mm.host_aperture[offset].val64 = entry->val64;
>>>> +		mm->ggtt_mm.host_aperture[offset].dirty = true;
>>>> +	} else if (vgpu_gmadr_is_hidden(mm->vgpu, index << I915_GTT_PAGE_SHIFT)) {
>>>> +		offset -= (vgpu_hidden_gmadr_base(mm->vgpu) >> PAGE_SHIFT);
>>>> +		mm->ggtt_mm.host_hidden[offset].val64 = entry->val64;
>>>> +		mm->ggtt_mm.host_hidden[offset].dirty = true;
>>>> +	}
>>> Looks 'dirty' flag is not needed at all, as what you need is to restore all present
>>> entries, so can just check present bit of val64 for that.
>> Strictly it's different. Host could update a page which isn't present but
>> other PTE fields are still valid, this is "present". For dirty it's purpose
>> is to record whether or not host has update this entry, regardless the page
>> presence. If only check the present bit, there will be a case that previous
>> PTE points to region that shouldn't be walked by the new PDE, but new
>> PDE/PTE update only updates PDE w/ present, but PTE w/o present. That could
>> leak information by new PDE + old PTE. Hypervisor should follow guest
>> behavior to update all PDE/PTEs, and guest should make sure partial update
>> won't leak information. If hypervisor doesn't follow guest page update,
>> information could be leaked.
> But this is only for GGTT which is just single level table right?
>
>> And actually, when vGPU created, all GGTT pages are updates so in practice,
>> all entries are dirty. So from real practice, it's true that dirty check can
>> be removed.
> You mean guest would update all entries normally? That's ok which we just follow
> guest's behavior. And we've also seen guest partial update of GGTT entry which
> could be point for suspend/resume and we reset present bit at that time so can be
> skipped in this case.
Yes guest would update all. So should I check present again, or just 
save/restore all?
>
>>>> +
>>>>    	pte_ops->set_entry(NULL, entry, index, false, 0, mm->vgpu);
>>>>    }
>>>> @@ -1928,6 +1939,7 @@ static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu)
>>>>    {
>>>>    	struct intel_vgpu_mm *mm;
>>>>    	unsigned long nr_entries;
>>>> +	u64 size;
>>>>    	mm = vgpu_alloc_mm(vgpu);
>>>>    	if (!mm)
>>>> @@ -1944,6 +1956,25 @@ static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu)
>>>>    		return ERR_PTR(-ENOMEM);
>>>>    	}
>>>> +	size = (vgpu_aperture_sz(vgpu) >> PAGE_SHIFT) *
>>>> +		sizeof(struct intel_vgpu_host_ggtt);
>>>> +	mm->ggtt_mm.host_aperture = vzalloc(size);
>>> I think normally just write required size calculation in alloc parameter
>>> instead of in extra variable.
>> Only for trim line length within 80 otherwise align to parentheses doesn't
>> look clean. Since kernel has 80 columns as the 'strong preferred limit', if
>> it's OK I'll put it into same line then no extra variable is required.
> No, that strong word is deprecated. ;)
It makes the things easier.
>
>>>> +	if (!mm->ggtt_mm.host_aperture) {
>>>> +		vfree(mm->ggtt_mm.virtual_ggtt);
>>>> +		vgpu_free_mm(mm);
>>>> +		return ERR_PTR(-ENOMEM);
>>>> +	}
>>>> +
>>>> +	size = (vgpu_hidden_sz(vgpu) >> PAGE_SHIFT) *
>>>> +		sizeof(struct intel_vgpu_host_ggtt);
>>>> +	mm->ggtt_mm.host_hidden = vzalloc(size);
>>>> +	if (!mm->ggtt_mm.host_hidden) {
>>>> +		vfree(mm->ggtt_mm.host_aperture);
>>>> +		vfree(mm->ggtt_mm.virtual_ggtt);
>>>> +		vgpu_free_mm(mm);
>>>> +		return ERR_PTR(-ENOMEM);
>>>> +	}
>>>> +
>>>>    	return mm;
>>>>    }
>>>> @@ -1971,6 +2002,8 @@ void _intel_vgpu_mm_release(struct kref *mm_ref)
>>>>    		invalidate_ppgtt_mm(mm);
>>>>    	} else {
>>>>    		vfree(mm->ggtt_mm.virtual_ggtt);
>>>> +		vfree(mm->ggtt_mm.host_aperture);
>>>> +		vfree(mm->ggtt_mm.host_hidden);
>>>>    	}
>>>>    	vgpu_free_mm(mm);
>>>> @@ -2852,3 +2885,45 @@ void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu)
>>>>    	intel_vgpu_destroy_all_ppgtt_mm(vgpu);
>>>>    	intel_vgpu_reset_ggtt(vgpu, true);
>>>>    }
>>>> +
>>>> +/**
>>>> + * intel_gvt_restore_ggtt - restore all vGPU's ggtt entries
>>>> + * @gvt: intel gvt device
>>>> + *
>>>> + * This function is called at driver resume stage to restore
>>>> + * GGTT entries of every vGPU.
>>>> + *
>>>> + */
>>>> +void intel_gvt_restore_ggtt(struct intel_gvt *gvt)
>>>> +{
>>>> +	struct intel_vgpu *vgpu;
>>>> +	struct intel_vgpu_mm *mm;
>>>> +	struct intel_vgpu_host_ggtt *host_ggtt;
>>>> +	int id;
>>>> +	u32 idx, num_low, num_hi, offset;
>>>> +
>>>> +	/* Restore dirty host ggtt for all vGPUs */
>>>> +	idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) {
>>>> +		mm = vgpu->gtt.ggtt_mm;
>>>> +
>>>> +		num_low = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT;
>>>> +		offset = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT;
>>>> +		for (idx = 0; idx < num_low; idx++) {
>>>> +			host_ggtt = &mm->ggtt_mm.host_aperture[idx];
>>>> +			if (host_ggtt->dirty) {
>>>> +				write_pte64(vgpu->gvt->gt->ggtt,
>>>> +					    offset + idx, host_ggtt->val64);
>>>> +			}
>>>> +		}
>>>> +
>>>> +		num_hi = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT;
>>>> +		offset = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT;
>>>> +		for (idx = 0; idx < num_hi; idx++) {
>>>> +			host_ggtt = &mm->ggtt_mm.host_hidden[idx];
>>>> +			if (host_ggtt->dirty) {
>>>> +				write_pte64(vgpu->gvt->gt->ggtt,
>>>> +					    offset + idx, host_ggtt->val64);
>>>> +			}
>>>> +		}
>>>> +	}
>>>> +}
>>>> diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
>>>> index 52d0d88abd86..7ec333a2ead5 100644
>>>> --- a/drivers/gpu/drm/i915/gvt/gtt.h
>>>> +++ b/drivers/gpu/drm/i915/gvt/gtt.h
>>>> @@ -139,6 +139,11 @@ struct intel_gvt_partial_pte {
>>>>    	struct list_head list;
>>>>    };
>>>> +struct intel_vgpu_host_ggtt {
>>>> +	u64 val64;
>>>> +	bool dirty;
>>>> +};
>>>> +
>>>>    struct intel_vgpu_mm {
>>>>    	enum intel_gvt_mm_type type;
>>>>    	struct intel_vgpu *vgpu;
>>>> @@ -164,6 +169,9 @@ struct intel_vgpu_mm {
>>>>    		} ppgtt_mm;
>>>>    		struct {
>>>>    			void *virtual_ggtt;
>>>> +			/* Save/restore for PM */
>>>> +			struct intel_vgpu_host_ggtt *host_aperture;
>>>> +			struct intel_vgpu_host_ggtt *host_hidden;
>>>>    			struct list_head partial_pte_list;
>>>>    		} ggtt_mm;
>>>>    	};
>>>> @@ -280,5 +288,6 @@ int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu,
>>>>    	unsigned int off, void *p_data, unsigned int bytes);
>>>>    void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu);
>>>> +void intel_gvt_restore_ggtt(struct intel_gvt *gvt);
>>>>    #endif /* _GVT_GTT_H_ */
>>>> diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
>>>> index c7c561237883..d0e9c43b031b 100644
>>>> --- a/drivers/gpu/drm/i915/gvt/gvt.c
>>>> +++ b/drivers/gpu/drm/i915/gvt/gvt.c
>>>> @@ -405,6 +405,14 @@ int intel_gvt_init_device(struct drm_i915_private *i915)
>>>>    	return ret;
>>>>    }
>>>> +int
>>>> +intel_gvt_pm_resume(struct intel_gvt *gvt)
>>>> +{
>>>> +	intel_gvt_restore_regs(gvt);
>>>> +	intel_gvt_restore_ggtt(gvt);
>>>> +	return 0;
>>> Please split restore_regs part which seems to be separate.
>> Split intel_gvt_restore_regs() further to fence regs and normal tracked
>> mmios with F_PM_SAVE?
> yep, those for regs restore handling.
>
> Thanks
>
>>>> +}
>>>> +
>>>>    int
>>>>    intel_gvt_register_hypervisor(struct intel_gvt_mpt *m)
>>>>    {
>>>> diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
>>>> index a81cf0f01e78..b3d6355dd797 100644
>>>> --- a/drivers/gpu/drm/i915/gvt/gvt.h
>>>> +++ b/drivers/gpu/drm/i915/gvt/gvt.h
>>>> @@ -255,6 +255,8 @@ struct intel_gvt_mmio {
>>>>    #define F_CMD_ACCESS	(1 << 3)
>>>>    /* This reg has been accessed by a VM */
>>>>    #define F_ACCESSED	(1 << 4)
>>>> +/* This reg requires save & restore during host PM suspend/resume */
>>>> +#define F_PM_SAVE	(1 << 5)
>>>>    /* This reg could be accessed by unaligned address */
>>>>    #define F_UNALIGN	(1 << 6)
>>>>    /* This reg is in GVT's mmio save-restor list and in hardware
>>>> @@ -685,6 +687,7 @@ void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu);
>>>>    void intel_gvt_debugfs_init(struct intel_gvt *gvt);
>>>>    void intel_gvt_debugfs_clean(struct intel_gvt *gvt);
>>>> +int intel_gvt_pm_resume(struct intel_gvt *gvt);
>>>>    #include "trace.h"
>>>>    #include "mpt.h"
>>>> diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
>>>> index 89353b97589a..1f2432d7df34 100644
>>>> --- a/drivers/gpu/drm/i915/gvt/handlers.c
>>>> +++ b/drivers/gpu/drm/i915/gvt/handlers.c
>>>> @@ -3120,9 +3120,10 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
>>>>    	MMIO_DFH(TRVATTL3PTRDW(2), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
>>>>    	MMIO_DFH(TRVATTL3PTRDW(3), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
>>>>    	MMIO_DFH(TRVADR, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
>>>> -	MMIO_DFH(TRTTE, D_SKL_PLUS, F_CMD_ACCESS,
>>>> -		NULL, gen9_trtte_write);
>>>> -	MMIO_DH(_MMIO(0x4dfc), D_SKL_PLUS, NULL, gen9_trtt_chicken_write);
>>>> +	MMIO_DFH(TRTTE, D_SKL_PLUS, F_CMD_ACCESS | F_PM_SAVE,
>>>> +		 NULL, gen9_trtte_write);
>>>> +	MMIO_DFH(_MMIO(0x4dfc), D_SKL_PLUS, F_PM_SAVE,
>>>> +		 NULL, gen9_trtt_chicken_write);
>>>>    	MMIO_D(_MMIO(0x46430), D_SKL_PLUS);
>>>> @@ -3661,3 +3662,35 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
>>>>    		intel_vgpu_default_mmio_read(vgpu, offset, pdata, bytes) :
>>>>    		intel_vgpu_default_mmio_write(vgpu, offset, pdata, bytes);
>>>>    }
>>>> +
>>>> +static inline int mmio_pm_restore_handler(struct intel_gvt *gvt,
>>>> +					  u32 offset, void *data)
>>>> +{
>>>> +	struct intel_vgpu *vgpu = data;
>>>> +	struct drm_i915_private *dev_priv = gvt->gt->i915;
>>>> +
>>>> +	if (gvt->mmio.mmio_attribute[offset >> 2] & F_PM_SAVE)
>>>> +		I915_WRITE(_MMIO(offset), vgpu_vreg(vgpu, offset));
>>>> +
>>>> +	return 0;
>>>> +}
>>>> +
>>>> +void intel_gvt_restore_regs(struct intel_gvt *gvt)
>>>> +{
>>>> +	struct intel_vgpu *vgpu;
>>>> +	int i, id;
>>>> +	u64 val;
>>>> +
>>>> +	idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) {
>>>> +		mmio_hw_access_pre(gvt->gt);
>>>> +		for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
>>>> +			val = vgpu_vreg64(vgpu, fence_num_to_offset(i));
>>>> +			intel_vgpu_write_fence(vgpu, i, val);
>>>> +		}
>>>> +
>>>> +		intel_gvt_for_each_tracked_mmio(gvt,
>>>> +						mmio_pm_restore_handler, vgpu);
>>>> +
>>>> +		mmio_hw_access_post(gvt->gt);
>>>> +	}
>>>> +}
>>>> diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h
>>>> index cc4812648bf4..999d9dda0614 100644
>>>> --- a/drivers/gpu/drm/i915/gvt/mmio.h
>>>> +++ b/drivers/gpu/drm/i915/gvt/mmio.h
>>>> @@ -104,4 +104,7 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
>>>>    int intel_vgpu_mask_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
>>>>    				  void *p_data, unsigned int bytes);
>>>> +
>>>> +void intel_gvt_restore_regs(struct intel_gvt *gvt);
>>>> +
>>>>    #endif
>>>> diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
>>>> index 99fe8aef1c67..4e70c1a9ef2e 100644
>>>> --- a/drivers/gpu/drm/i915/intel_gvt.c
>>>> +++ b/drivers/gpu/drm/i915/intel_gvt.c
>>>> @@ -24,6 +24,7 @@
>>>>    #include "i915_drv.h"
>>>>    #include "i915_vgpu.h"
>>>>    #include "intel_gvt.h"
>>>> +#include "gvt/gvt.h"
>>>>    /**
>>>>     * DOC: Intel GVT-g host support
>>>> @@ -147,3 +148,17 @@ void intel_gvt_driver_remove(struct drm_i915_private *dev_priv)
>>>>    	intel_gvt_clean_device(dev_priv);
>>>>    }
>>>> +
>>>> +/**
>>>> + * intel_gvt_resume - GVT resume routine wapper
>>>> + *
>>>> + * @dev_priv: drm i915 private *
>>>> + *
>>>> + * This function is called at the i915 driver resume stage to restore required
>>>> + * HW status for GVT so that vGPU can continue running after resumed.
>>>> + */
>>>> +void intel_gvt_resume(struct drm_i915_private *dev_priv)
>>>> +{
>>>> +	if (intel_gvt_active(dev_priv))
>>>> +		intel_gvt_pm_resume(dev_priv->gvt);
>>>> +}
>>>> diff --git a/drivers/gpu/drm/i915/intel_gvt.h b/drivers/gpu/drm/i915/intel_gvt.h
>>>> index 502fad8a8652..d7d3fb6186fd 100644
>>>> --- a/drivers/gpu/drm/i915/intel_gvt.h
>>>> +++ b/drivers/gpu/drm/i915/intel_gvt.h
>>>> @@ -33,6 +33,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv);
>>>>    void intel_gvt_clean_device(struct drm_i915_private *dev_priv);
>>>>    int intel_gvt_init_host(void);
>>>>    void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv);
>>>> +void intel_gvt_resume(struct drm_i915_private *dev_priv);
>>>>    #else
>>>>    static inline int intel_gvt_init(struct drm_i915_private *dev_priv)
>>>>    {
>>>> @@ -46,6 +47,10 @@ static inline void intel_gvt_driver_remove(struct drm_i915_private *dev_priv)
>>>>    static inline void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv)
>>>>    {
>>>>    }
>>>> +
>>>> +static inline void intel_gvt_resume(struct drm_i915_private *dev_priv)
>>>> +{
>>>> +}
>>>>    #endif
>>>>    #endif /* _INTEL_GVT_H_ */
>>>> -- 
>>>> 2.29.0
>>>>
>>>> _______________________________________________
>>>> intel-gvt-dev mailing list
>>>> intel-gvt-dev@lists.freedesktop.org
>>>> https://lists.freedesktop.org/mailman/listinfo/intel-gvt-dev
>> -- 
>> Best Regards,
>> Colin Xu
>>
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index a3a4305eda01..33385d640cb9 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -636,9 +636,20 @@  static void ggtt_set_host_entry(struct intel_vgpu_mm *mm,
 		struct intel_gvt_gtt_entry *entry, unsigned long index)
 {
 	struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
+	unsigned long offset = index;
 
 	GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
 
+	if (vgpu_gmadr_is_aperture(mm->vgpu, index << I915_GTT_PAGE_SHIFT)) {
+		offset -= (vgpu_aperture_gmadr_base(mm->vgpu) >> PAGE_SHIFT);
+		mm->ggtt_mm.host_aperture[offset].val64 = entry->val64;
+		mm->ggtt_mm.host_aperture[offset].dirty = true;
+	} else if (vgpu_gmadr_is_hidden(mm->vgpu, index << I915_GTT_PAGE_SHIFT)) {
+		offset -= (vgpu_hidden_gmadr_base(mm->vgpu) >> PAGE_SHIFT);
+		mm->ggtt_mm.host_hidden[offset].val64 = entry->val64;
+		mm->ggtt_mm.host_hidden[offset].dirty = true;
+	}
+
 	pte_ops->set_entry(NULL, entry, index, false, 0, mm->vgpu);
 }
 
@@ -1928,6 +1939,7 @@  static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu)
 {
 	struct intel_vgpu_mm *mm;
 	unsigned long nr_entries;
+	u64 size;
 
 	mm = vgpu_alloc_mm(vgpu);
 	if (!mm)
@@ -1944,6 +1956,25 @@  static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu)
 		return ERR_PTR(-ENOMEM);
 	}
 
+	size = (vgpu_aperture_sz(vgpu) >> PAGE_SHIFT) *
+		sizeof(struct intel_vgpu_host_ggtt);
+	mm->ggtt_mm.host_aperture = vzalloc(size);
+	if (!mm->ggtt_mm.host_aperture) {
+		vfree(mm->ggtt_mm.virtual_ggtt);
+		vgpu_free_mm(mm);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	size = (vgpu_hidden_sz(vgpu) >> PAGE_SHIFT) *
+		sizeof(struct intel_vgpu_host_ggtt);
+	mm->ggtt_mm.host_hidden = vzalloc(size);
+	if (!mm->ggtt_mm.host_hidden) {
+		vfree(mm->ggtt_mm.host_aperture);
+		vfree(mm->ggtt_mm.virtual_ggtt);
+		vgpu_free_mm(mm);
+		return ERR_PTR(-ENOMEM);
+	}
+
 	return mm;
 }
 
@@ -1971,6 +2002,8 @@  void _intel_vgpu_mm_release(struct kref *mm_ref)
 		invalidate_ppgtt_mm(mm);
 	} else {
 		vfree(mm->ggtt_mm.virtual_ggtt);
+		vfree(mm->ggtt_mm.host_aperture);
+		vfree(mm->ggtt_mm.host_hidden);
 	}
 
 	vgpu_free_mm(mm);
@@ -2852,3 +2885,45 @@  void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu)
 	intel_vgpu_destroy_all_ppgtt_mm(vgpu);
 	intel_vgpu_reset_ggtt(vgpu, true);
 }
+
+/**
+ * intel_gvt_restore_ggtt - restore all vGPU's ggtt entries
+ * @gvt: intel gvt device
+ *
+ * This function is called at driver resume stage to restore
+ * GGTT entries of every vGPU.
+ *
+ */
+void intel_gvt_restore_ggtt(struct intel_gvt *gvt)
+{
+	struct intel_vgpu *vgpu;
+	struct intel_vgpu_mm *mm;
+	struct intel_vgpu_host_ggtt *host_ggtt;
+	int id;
+	u32 idx, num_low, num_hi, offset;
+
+	/* Restore dirty host ggtt for all vGPUs */
+	idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) {
+		mm = vgpu->gtt.ggtt_mm;
+
+		num_low = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT;
+		offset = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT;
+		for (idx = 0; idx < num_low; idx++) {
+			host_ggtt = &mm->ggtt_mm.host_aperture[idx];
+			if (host_ggtt->dirty) {
+				write_pte64(vgpu->gvt->gt->ggtt,
+					    offset + idx, host_ggtt->val64);
+			}
+		}
+
+		num_hi = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT;
+		offset = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT;
+		for (idx = 0; idx < num_hi; idx++) {
+			host_ggtt = &mm->ggtt_mm.host_hidden[idx];
+			if (host_ggtt->dirty) {
+				write_pte64(vgpu->gvt->gt->ggtt,
+					    offset + idx, host_ggtt->val64);
+			}
+		}
+	}
+}
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index 52d0d88abd86..7ec333a2ead5 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -139,6 +139,11 @@  struct intel_gvt_partial_pte {
 	struct list_head list;
 };
 
+struct intel_vgpu_host_ggtt {
+	u64 val64;
+	bool dirty;
+};
+
 struct intel_vgpu_mm {
 	enum intel_gvt_mm_type type;
 	struct intel_vgpu *vgpu;
@@ -164,6 +169,9 @@  struct intel_vgpu_mm {
 		} ppgtt_mm;
 		struct {
 			void *virtual_ggtt;
+			/* Save/restore for PM */
+			struct intel_vgpu_host_ggtt *host_aperture;
+			struct intel_vgpu_host_ggtt *host_hidden;
 			struct list_head partial_pte_list;
 		} ggtt_mm;
 	};
@@ -280,5 +288,6 @@  int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu,
 	unsigned int off, void *p_data, unsigned int bytes);
 
 void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu);
+void intel_gvt_restore_ggtt(struct intel_gvt *gvt);
 
 #endif /* _GVT_GTT_H_ */
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index c7c561237883..d0e9c43b031b 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -405,6 +405,14 @@  int intel_gvt_init_device(struct drm_i915_private *i915)
 	return ret;
 }
 
+int
+intel_gvt_pm_resume(struct intel_gvt *gvt)
+{
+	intel_gvt_restore_regs(gvt);
+	intel_gvt_restore_ggtt(gvt);
+	return 0;
+}
+
 int
 intel_gvt_register_hypervisor(struct intel_gvt_mpt *m)
 {
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index a81cf0f01e78..b3d6355dd797 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -255,6 +255,8 @@  struct intel_gvt_mmio {
 #define F_CMD_ACCESS	(1 << 3)
 /* This reg has been accessed by a VM */
 #define F_ACCESSED	(1 << 4)
+/* This reg requires save & restore during host PM suspend/resume */
+#define F_PM_SAVE	(1 << 5)
 /* This reg could be accessed by unaligned address */
 #define F_UNALIGN	(1 << 6)
 /* This reg is in GVT's mmio save-restor list and in hardware
@@ -685,6 +687,7 @@  void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu);
 void intel_gvt_debugfs_init(struct intel_gvt *gvt);
 void intel_gvt_debugfs_clean(struct intel_gvt *gvt);
 
+int intel_gvt_pm_resume(struct intel_gvt *gvt);
 
 #include "trace.h"
 #include "mpt.h"
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 89353b97589a..1f2432d7df34 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -3120,9 +3120,10 @@  static int init_skl_mmio_info(struct intel_gvt *gvt)
 	MMIO_DFH(TRVATTL3PTRDW(2), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
 	MMIO_DFH(TRVATTL3PTRDW(3), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
 	MMIO_DFH(TRVADR, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
-	MMIO_DFH(TRTTE, D_SKL_PLUS, F_CMD_ACCESS,
-		NULL, gen9_trtte_write);
-	MMIO_DH(_MMIO(0x4dfc), D_SKL_PLUS, NULL, gen9_trtt_chicken_write);
+	MMIO_DFH(TRTTE, D_SKL_PLUS, F_CMD_ACCESS | F_PM_SAVE,
+		 NULL, gen9_trtte_write);
+	MMIO_DFH(_MMIO(0x4dfc), D_SKL_PLUS, F_PM_SAVE,
+		 NULL, gen9_trtt_chicken_write);
 
 	MMIO_D(_MMIO(0x46430), D_SKL_PLUS);
 
@@ -3661,3 +3662,35 @@  int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
 		intel_vgpu_default_mmio_read(vgpu, offset, pdata, bytes) :
 		intel_vgpu_default_mmio_write(vgpu, offset, pdata, bytes);
 }
+
+static inline int mmio_pm_restore_handler(struct intel_gvt *gvt,
+					  u32 offset, void *data)
+{
+	struct intel_vgpu *vgpu = data;
+	struct drm_i915_private *dev_priv = gvt->gt->i915;
+
+	if (gvt->mmio.mmio_attribute[offset >> 2] & F_PM_SAVE)
+		I915_WRITE(_MMIO(offset), vgpu_vreg(vgpu, offset));
+
+	return 0;
+}
+
+void intel_gvt_restore_regs(struct intel_gvt *gvt)
+{
+	struct intel_vgpu *vgpu;
+	int i, id;
+	u64 val;
+
+	idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) {
+		mmio_hw_access_pre(gvt->gt);
+		for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
+			val = vgpu_vreg64(vgpu, fence_num_to_offset(i));
+			intel_vgpu_write_fence(vgpu, i, val);
+		}
+
+		intel_gvt_for_each_tracked_mmio(gvt,
+						mmio_pm_restore_handler, vgpu);
+
+		mmio_hw_access_post(gvt->gt);
+	}
+}
diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h
index cc4812648bf4..999d9dda0614 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.h
+++ b/drivers/gpu/drm/i915/gvt/mmio.h
@@ -104,4 +104,7 @@  int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
 
 int intel_vgpu_mask_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
 				  void *p_data, unsigned int bytes);
+
+void intel_gvt_restore_regs(struct intel_gvt *gvt);
+
 #endif
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
index 99fe8aef1c67..4e70c1a9ef2e 100644
--- a/drivers/gpu/drm/i915/intel_gvt.c
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -24,6 +24,7 @@ 
 #include "i915_drv.h"
 #include "i915_vgpu.h"
 #include "intel_gvt.h"
+#include "gvt/gvt.h"
 
 /**
  * DOC: Intel GVT-g host support
@@ -147,3 +148,17 @@  void intel_gvt_driver_remove(struct drm_i915_private *dev_priv)
 
 	intel_gvt_clean_device(dev_priv);
 }
+
+/**
+ * intel_gvt_resume - GVT resume routine wapper
+ *
+ * @dev_priv: drm i915 private *
+ *
+ * This function is called at the i915 driver resume stage to restore required
+ * HW status for GVT so that vGPU can continue running after resumed.
+ */
+void intel_gvt_resume(struct drm_i915_private *dev_priv)
+{
+	if (intel_gvt_active(dev_priv))
+		intel_gvt_pm_resume(dev_priv->gvt);
+}
diff --git a/drivers/gpu/drm/i915/intel_gvt.h b/drivers/gpu/drm/i915/intel_gvt.h
index 502fad8a8652..d7d3fb6186fd 100644
--- a/drivers/gpu/drm/i915/intel_gvt.h
+++ b/drivers/gpu/drm/i915/intel_gvt.h
@@ -33,6 +33,7 @@  int intel_gvt_init_device(struct drm_i915_private *dev_priv);
 void intel_gvt_clean_device(struct drm_i915_private *dev_priv);
 int intel_gvt_init_host(void);
 void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv);
+void intel_gvt_resume(struct drm_i915_private *dev_priv);
 #else
 static inline int intel_gvt_init(struct drm_i915_private *dev_priv)
 {
@@ -46,6 +47,10 @@  static inline void intel_gvt_driver_remove(struct drm_i915_private *dev_priv)
 static inline void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv)
 {
 }
+
+static inline void intel_gvt_resume(struct drm_i915_private *dev_priv)
+{
+}
 #endif
 
 #endif /* _INTEL_GVT_H_ */