diff mbox

[RFCv2,03/14] drm/i915: Introduce host graphics memory/fence partition for GVT-g

Message ID 1455795741-3487-4-git-send-email-zhi.a.wang@intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Wang, Zhi A Feb. 18, 2016, 11:42 a.m. UTC
From: Bing Niu <bing.niu@intel.com>

This patch introduces host graphics memory/fence partition when GVT-g
is enabled.

Under GVT-g, i915 host driver only owned limited graphics resources,
others are managed by GVT-g resource allocator and kept for other vGPUs.

v2:
- Address all coding-style comments from Joonas previously.
- Fix errors and warnning reported by checkpatch.pl. (Joonas)
- Move the graphs into the header files. (Daniel)

Signed-off-by: Bing Niu <bing.niu@intel.com>
Signed-off-by: Zhi Wang <zhi.a.wang@intel.com>
---
 drivers/gpu/drm/i915/gvt/gvt.c      |  4 ++++
 drivers/gpu/drm/i915/gvt/params.c   | 12 ++++++++++++
 drivers/gpu/drm/i915/gvt/params.h   |  3 +++
 drivers/gpu/drm/i915/i915_drv.h     | 35 +++++++++++++++++++++++++++++++++++
 drivers/gpu/drm/i915/i915_gem.c     |  4 +++-
 drivers/gpu/drm/i915/i915_gem_gtt.c |  4 ++--
 drivers/gpu/drm/i915/i915_vgpu.c    | 21 +++++++++++++++++----
 7 files changed, 76 insertions(+), 7 deletions(-)

Comments

Joonas Lahtinen Feb. 23, 2016, 1:16 p.m. UTC | #1
On to, 2016-02-18 at 19:42 +0800, Zhi Wang wrote:
> From: Bing Niu <bing.niu@intel.com>
> 
> This patch introduces host graphics memory/fence partition when GVT-g
> is enabled.
> 
> Under GVT-g, i915 host driver only owned limited graphics resources,
> others are managed by GVT-g resource allocator and kept for other vGPUs.
> 
> v2:
> - Address all coding-style comments from Joonas previously.
> - Fix errors and warnning reported by checkpatch.pl. (Joonas)
> - Move the graphs into the header files. (Daniel)
> 
> Signed-off-by: Bing Niu <bing.niu@intel.com>
> Signed-off-by: Zhi Wang <zhi.a.wang@intel.com>
> ---
>  drivers/gpu/drm/i915/gvt/gvt.c      |  4 ++++
>  drivers/gpu/drm/i915/gvt/params.c   | 12 ++++++++++++
>  drivers/gpu/drm/i915/gvt/params.h   |  3 +++
>  drivers/gpu/drm/i915/i915_drv.h     | 35 +++++++++++++++++++++++++++++++++++
>  drivers/gpu/drm/i915/i915_gem.c     |  4 +++-
>  drivers/gpu/drm/i915/i915_gem_gtt.c |  4 ++--
>  drivers/gpu/drm/i915/i915_vgpu.c    | 21 +++++++++++++++++----
>  7 files changed, 76 insertions(+), 7 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
> index 52cfa32..2099b7e 100644
> --- a/drivers/gpu/drm/i915/gvt/gvt.c
> +++ b/drivers/gpu/drm/i915/gvt/gvt.c
> @@ -348,6 +348,10 @@ void *gvt_create_pgt_device(struct drm_i915_private *dev_priv)
>  		goto err;
>  	}
>  
> +	dev_priv->gvt.host_fence_sz = gvt.host_fence_sz;
> +	dev_priv->gvt.host_low_gm_sz_in_mb = gvt.host_low_gm_sz;
> +	dev_priv->gvt.host_high_gm_sz_in_mb = gvt.host_high_gm_sz;

I'm thinking, could we expose the pgt_device struct (at least
partially, and then have a PIMPL pattern), to avoid this kind of
duplication of declarations and unnecessary copies between i915 and
i915_gvt modules?

It's little rough that the gvt driver writes to i915_private struct.
I'd rather see that gvt.host_fence_sz and other variables get sanitized
and then written to pgt_device (maybe the public part would be
i915_pgt_device) and used by gvt and i915 code.

Was this ever considered?

> +
>  	gvt_dbg_core("pgt device creation done, id %d", pdev->id);
>  
>  	return pdev;
> diff --git a/drivers/gpu/drm/i915/gvt/params.c b/drivers/gpu/drm/i915/gvt/params.c
> index d381d17..75195fd 100644
> --- a/drivers/gpu/drm/i915/gvt/params.c
> +++ b/drivers/gpu/drm/i915/gvt/params.c
> @@ -26,7 +26,19 @@
>  struct gvt_kernel_params gvt = {
>  	.enable = false,
>  	.debug = 0,
> +	.host_low_gm_sz = 96,	/* in MB */
> +	.host_high_gm_sz = 384, /* in MB */
> +	.host_fence_sz = 4,
>  };
>  
>  module_param_named(gvt_enable, gvt.enable, bool, 0600);
>  MODULE_PARM_DESC(gvt_enable, "Enable Intel GVT-g host support");
> +
> +module_param_named(gvt_host_low_gm_sz, gvt.host_low_gm_sz, int, 0600);
> +MODULE_PARM_DESC(gvt_host_low_gm_sz, "Amount of aperture size of host (in MB)");

As i915_gvt will be the host module, "gvt_host" can be removed so the
module parameters become;

i915_gvt.low_gm_sz instead of i915_gvt.gvt_host_low_gm_sz

Also should these be _unsafe parameters because I bet they can make the
machine go crazy if wrong?

> +
> +module_param_named(gvt_host_high_gm_sz, gvt.host_high_gm_sz, int, 0600);
> +MODULE_PARM_DESC(gvt_host_high_gm_sz, "Amount of high memory size of host (in MB)");
> +
> +module_param_named(gvt_host_fence_sz, gvt.host_fence_sz, int, 0600);
> +MODULE_PARM_DESC(gvt_host_fence_sz, "Amount of fence size of host (in MB)");
> diff --git a/drivers/gpu/drm/i915/gvt/params.h b/drivers/gpu/drm/i915/gvt/params.h
> index d2955b9..f4e9356 100644
> --- a/drivers/gpu/drm/i915/gvt/params.h
> +++ b/drivers/gpu/drm/i915/gvt/params.h
> @@ -27,6 +27,9 @@
>  struct gvt_kernel_params {
>  	bool enable;
>  	int debug;
> +	int host_low_gm_sz;
> +	int host_high_gm_sz;
> +	int host_fence_sz;
>  };
>  
>  extern struct gvt_kernel_params gvt;
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index 2f897c3..1fd5575 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -1705,8 +1705,43 @@ struct i915_workarounds {
>  	u32 hw_whitelist_count[I915_NUM_RINGS];
>  };
>  
> +/*
> + * Under GVT-g, i915 host driver only owned limited graphics resources,
> + * others are managed by GVT-g resource allocator and kept for other vGPUs.
> + *
> + * For graphics memory space partition, a typical layout looks like:
> + *
> + * +-------+-----------------------+------+-----------------------+
> + * |* Host |   *GVT-g Resource     |* Host|   *GVT-g Resource     |
> + * | Owned |   Allocator Managed   | Owned|   Allocator Managed   |
> + * |       |                       |      |                       |
> + * +---------------+-------+----------------------+-------+-------+
> + * |       |       |       |       |      |       |       |       |
> + * | i915  | vm 1  | vm 2  | vm 3  | i915 | vm 1  | vm 2  | vm 3  |
> + * |       |       |       |       |      |       |       |       |
> + * +-------+-------+-------+--------------+-------+-------+-------+
> + * |           Aperture            |            Hidden            |
> + * +-------------------------------+------------------------------+
> + * |                       GGTT memory space                      |
> + * +--------------------------------------------------------------+
> + *
> + * Similar with fence registers partition:
> + *
> + * +------ +-----------------------+
> + * | * Host|    GVT-g Resource     |
> + * | Owned |   Allocator Managed   +
> + * 0       |                       31
> + * +---------------+-------+-------+
> + * |       |       |       |       |
> + * | i915  | vm 1  | vm 2  | vm 3  |
> + * |       |       |       |       |
> + * +-------+-------+-------+-------+
> + */
>  struct i915_gvt {
>  	void *pgt_device;
> +	u32 host_low_gm_sz_in_mb;
> +	u32 host_high_gm_sz_in_mb;
> +	int host_fence_sz;
>  };
>  
>  struct i915_virtual_gpu {
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index f68f346..1c0006a 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -5078,7 +5078,9 @@ i915_gem_load_init(struct drm_device *dev)
>  	else
>  		dev_priv->num_fence_regs = 8;
>  
> -	if (intel_vgpu_active(dev))
> +	if (intel_gvt_active(dev))
> +		dev_priv->num_fence_regs = dev_priv->gvt.host_fence_sz;
> +	else if (intel_vgpu_active(dev))
>  		dev_priv->num_fence_regs =
>  				I915_READ(vgtif_reg(avail_rs.fence_num));
>  
> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
> index 9127f8f..de09dd4 100644
> --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
> @@ -2734,7 +2734,7 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
>  	i915_address_space_init(ggtt_vm, dev_priv);
>  	ggtt_vm->total += PAGE_SIZE;
>  
> -	if (intel_vgpu_active(dev)) {
> +	if (intel_vgpu_active(dev) || intel_gvt_active(dev)) {
>  		ret = intel_vgt_balloon(dev);
>  		if (ret)
>  			return ret;
> @@ -2833,7 +2833,7 @@ void i915_global_gtt_cleanup(struct drm_device *dev)
>  	i915_gem_cleanup_stolen(dev);
>  
>  	if (drm_mm_initialized(&vm->mm)) {
> -		if (intel_vgpu_active(dev))
> +		if (intel_vgpu_active(dev) || intel_gvt_active(dev))
>  			intel_vgt_deballoon();
>  
>  		drm_mm_takedown(&vm->mm);
> diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
> index dea7429..7be1435 100644
> --- a/drivers/gpu/drm/i915/i915_vgpu.c
> +++ b/drivers/gpu/drm/i915/i915_vgpu.c
> @@ -188,10 +188,23 @@ int intel_vgt_balloon(struct drm_device *dev)
>  	unsigned long unmappable_base, unmappable_size, unmappable_end;
>  	int ret;
>  
> -	mappable_base = I915_READ(vgtif_reg(avail_rs.mappable_gmadr.base));
> -	mappable_size = I915_READ(vgtif_reg(avail_rs.mappable_gmadr.size));
> -	unmappable_base = I915_READ(vgtif_reg(avail_rs.nonmappable_gmadr.base));
> -	unmappable_size = I915_READ(vgtif_reg(avail_rs.nonmappable_gmadr.size));
> +	if (intel_gvt_active(dev)) {
> +		mappable_base = 0;
> +		mappable_size = dev_priv->gvt.host_low_gm_sz_in_mb << 20;
> +		unmappable_base = dev_priv->gtt.mappable_end;
> +		unmappable_size = dev_priv->gvt.host_high_gm_sz_in_mb << 20;
> +	} else if (intel_vgpu_active(dev)) {
> +		mappable_base = I915_READ(
> +			vgtif_reg(avail_rs.mappable_gmadr.base));
> +		mappable_size = I915_READ(
> +			vgtif_reg(avail_rs.mappable_gmadr.size));
> +		unmappable_base = I915_READ(
> +			vgtif_reg(avail_rs.nonmappable_gmadr.base));
> +		unmappable_size = I915_READ(
> +			vgtif_reg(avail_rs.nonmappable_gmadr.size));
> +	} else {
> +		return -ENODEV;
> +	}
>  
>  	mappable_end = mappable_base + mappable_size;
>  	unmappable_end = unmappable_base + unmappable_size;
Wang, Zhi A Feb. 23, 2016, 1:23 p.m. UTC | #2
On 02/23/16 21:16, Joonas Lahtinen wrote:
> On to, 2016-02-18 at 19:42 +0800, Zhi Wang wrote:
>> From: Bing Niu <bing.niu@intel.com>
>>
>> This patch introduces host graphics memory/fence partition when GVT-g
>> is enabled.
>>
>> Under GVT-g, i915 host driver only owned limited graphics resources,
>> others are managed by GVT-g resource allocator and kept for other vGPUs.
>>
>> v2:
>> - Address all coding-style comments from Joonas previously.
>> - Fix errors and warnning reported by checkpatch.pl. (Joonas)
>> - Move the graphs into the header files. (Daniel)
>>
>> Signed-off-by: Bing Niu <bing.niu@intel.com>
>> Signed-off-by: Zhi Wang <zhi.a.wang@intel.com>
>> ---
>>   drivers/gpu/drm/i915/gvt/gvt.c      |  4 ++++
>>   drivers/gpu/drm/i915/gvt/params.c   | 12 ++++++++++++
>>   drivers/gpu/drm/i915/gvt/params.h   |  3 +++
>>   drivers/gpu/drm/i915/i915_drv.h     | 35 +++++++++++++++++++++++++++++++++++
>>   drivers/gpu/drm/i915/i915_gem.c     |  4 +++-
>>   drivers/gpu/drm/i915/i915_gem_gtt.c |  4 ++--
>>   drivers/gpu/drm/i915/i915_vgpu.c    | 21 +++++++++++++++++----
>>   7 files changed, 76 insertions(+), 7 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
>> index 52cfa32..2099b7e 100644
>> --- a/drivers/gpu/drm/i915/gvt/gvt.c
>> +++ b/drivers/gpu/drm/i915/gvt/gvt.c
>> @@ -348,6 +348,10 @@ void *gvt_create_pgt_device(struct drm_i915_private *dev_priv)
>>   		goto err;
>>   	}
>>
>> +	dev_priv->gvt.host_fence_sz = gvt.host_fence_sz;
>> +	dev_priv->gvt.host_low_gm_sz_in_mb = gvt.host_low_gm_sz;
>> +	dev_priv->gvt.host_high_gm_sz_in_mb = gvt.host_high_gm_sz;
>
> I'm thinking, could we expose the pgt_device struct (at least
> partially, and then have a PIMPL pattern), to avoid this kind of
> duplication of declarations and unnecessary copies between i915 and
> i915_gvt modules?
>
> It's little rough that the gvt driver writes to i915_private struct.
> I'd rather see that gvt.host_fence_sz and other variables get sanitized
> and then written to pgt_device (maybe the public part would be
> i915_pgt_device) and used by gvt and i915 code.
>
> Was this ever considered?
>
The previous version do something similar like that, both i915 and gvt 
reads GVT module kernel parameter but considered that GVT modules could 
be configured as "n" in kernel configuration, probably we should let 
i915 to store this information and GVT to configure it if GVT is active?
>> +
>>   	gvt_dbg_core("pgt device creation done, id %d", pdev->id);
>>
>>   	return pdev;
>> diff --git a/drivers/gpu/drm/i915/gvt/params.c b/drivers/gpu/drm/i915/gvt/params.c
>> index d381d17..75195fd 100644
>> --- a/drivers/gpu/drm/i915/gvt/params.c
>> +++ b/drivers/gpu/drm/i915/gvt/params.c
>> @@ -26,7 +26,19 @@
>>   struct gvt_kernel_params gvt = {
>>   	.enable = false,
>>   	.debug = 0,
>> +	.host_low_gm_sz = 96,	/* in MB */
>> +	.host_high_gm_sz = 384, /* in MB */
>> +	.host_fence_sz = 4,
>>   };
>>
>>   module_param_named(gvt_enable, gvt.enable, bool, 0600);
>>   MODULE_PARM_DESC(gvt_enable, "Enable Intel GVT-g host support");
>> +
>> +module_param_named(gvt_host_low_gm_sz, gvt.host_low_gm_sz, int, 0600);
>> +MODULE_PARM_DESC(gvt_host_low_gm_sz, "Amount of aperture size of host (in MB)");
>
> As i915_gvt will be the host module, "gvt_host" can be removed so the
> module parameters become;
>
> i915_gvt.low_gm_sz instead of i915_gvt.gvt_host_low_gm_sz
>
> Also should these be _unsafe parameters because I bet they can make the
> machine go crazy if wrong?
>
Yes, the params need to be checked, indeed.
>> +
>> +module_param_named(gvt_host_high_gm_sz, gvt.host_high_gm_sz, int, 0600);
>> +MODULE_PARM_DESC(gvt_host_high_gm_sz, "Amount of high memory size of host (in MB)");
>> +
>> +module_param_named(gvt_host_fence_sz, gvt.host_fence_sz, int, 0600);
>> +MODULE_PARM_DESC(gvt_host_fence_sz, "Amount of fence size of host (in MB)");
>> diff --git a/drivers/gpu/drm/i915/gvt/params.h b/drivers/gpu/drm/i915/gvt/params.h
>> index d2955b9..f4e9356 100644
>> --- a/drivers/gpu/drm/i915/gvt/params.h
>> +++ b/drivers/gpu/drm/i915/gvt/params.h
>> @@ -27,6 +27,9 @@
>>   struct gvt_kernel_params {
>>   	bool enable;
>>   	int debug;
>> +	int host_low_gm_sz;
>> +	int host_high_gm_sz;
>> +	int host_fence_sz;
>>   };
>>
>>   extern struct gvt_kernel_params gvt;
>> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
>> index 2f897c3..1fd5575 100644
>> --- a/drivers/gpu/drm/i915/i915_drv.h
>> +++ b/drivers/gpu/drm/i915/i915_drv.h
>> @@ -1705,8 +1705,43 @@ struct i915_workarounds {
>>   	u32 hw_whitelist_count[I915_NUM_RINGS];
>>   };
>>
>> +/*
>> + * Under GVT-g, i915 host driver only owned limited graphics resources,
>> + * others are managed by GVT-g resource allocator and kept for other vGPUs.
>> + *
>> + * For graphics memory space partition, a typical layout looks like:
>> + *
>> + * +-------+-----------------------+------+-----------------------+
>> + * |* Host |   *GVT-g Resource     |* Host|   *GVT-g Resource     |
>> + * | Owned |   Allocator Managed   | Owned|   Allocator Managed   |
>> + * |       |                       |      |                       |
>> + * +---------------+-------+----------------------+-------+-------+
>> + * |       |       |       |       |      |       |       |       |
>> + * | i915  | vm 1  | vm 2  | vm 3  | i915 | vm 1  | vm 2  | vm 3  |
>> + * |       |       |       |       |      |       |       |       |
>> + * +-------+-------+-------+--------------+-------+-------+-------+
>> + * |           Aperture            |            Hidden            |
>> + * +-------------------------------+------------------------------+
>> + * |                       GGTT memory space                      |
>> + * +--------------------------------------------------------------+
>> + *
>> + * Similar with fence registers partition:
>> + *
>> + * +------ +-----------------------+
>> + * | * Host|    GVT-g Resource     |
>> + * | Owned |   Allocator Managed   +
>> + * 0       |                       31
>> + * +---------------+-------+-------+
>> + * |       |       |       |       |
>> + * | i915  | vm 1  | vm 2  | vm 3  |
>> + * |       |       |       |       |
>> + * +-------+-------+-------+-------+
>> + */
>>   struct i915_gvt {
>>   	void *pgt_device;
>> +	u32 host_low_gm_sz_in_mb;
>> +	u32 host_high_gm_sz_in_mb;
>> +	int host_fence_sz;
>>   };
>>
>>   struct i915_virtual_gpu {
>> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
>> index f68f346..1c0006a 100644
>> --- a/drivers/gpu/drm/i915/i915_gem.c
>> +++ b/drivers/gpu/drm/i915/i915_gem.c
>> @@ -5078,7 +5078,9 @@ i915_gem_load_init(struct drm_device *dev)
>>   	else
>>   		dev_priv->num_fence_regs = 8;
>>
>> -	if (intel_vgpu_active(dev))
>> +	if (intel_gvt_active(dev))
>> +		dev_priv->num_fence_regs = dev_priv->gvt.host_fence_sz;
>> +	else if (intel_vgpu_active(dev))
>>   		dev_priv->num_fence_regs =
>>   				I915_READ(vgtif_reg(avail_rs.fence_num));
>>
>> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
>> index 9127f8f..de09dd4 100644
>> --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
>> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
>> @@ -2734,7 +2734,7 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
>>   	i915_address_space_init(ggtt_vm, dev_priv);
>>   	ggtt_vm->total += PAGE_SIZE;
>>
>> -	if (intel_vgpu_active(dev)) {
>> +	if (intel_vgpu_active(dev) || intel_gvt_active(dev)) {
>>   		ret = intel_vgt_balloon(dev);
>>   		if (ret)
>>   			return ret;
>> @@ -2833,7 +2833,7 @@ void i915_global_gtt_cleanup(struct drm_device *dev)
>>   	i915_gem_cleanup_stolen(dev);
>>
>>   	if (drm_mm_initialized(&vm->mm)) {
>> -		if (intel_vgpu_active(dev))
>> +		if (intel_vgpu_active(dev) || intel_gvt_active(dev))
>>   			intel_vgt_deballoon();
>>
>>   		drm_mm_takedown(&vm->mm);
>> diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
>> index dea7429..7be1435 100644
>> --- a/drivers/gpu/drm/i915/i915_vgpu.c
>> +++ b/drivers/gpu/drm/i915/i915_vgpu.c
>> @@ -188,10 +188,23 @@ int intel_vgt_balloon(struct drm_device *dev)
>>   	unsigned long unmappable_base, unmappable_size, unmappable_end;
>>   	int ret;
>>
>> -	mappable_base = I915_READ(vgtif_reg(avail_rs.mappable_gmadr.base));
>> -	mappable_size = I915_READ(vgtif_reg(avail_rs.mappable_gmadr.size));
>> -	unmappable_base = I915_READ(vgtif_reg(avail_rs.nonmappable_gmadr.base));
>> -	unmappable_size = I915_READ(vgtif_reg(avail_rs.nonmappable_gmadr.size));
>> +	if (intel_gvt_active(dev)) {
>> +		mappable_base = 0;
>> +		mappable_size = dev_priv->gvt.host_low_gm_sz_in_mb << 20;
>> +		unmappable_base = dev_priv->gtt.mappable_end;
>> +		unmappable_size = dev_priv->gvt.host_high_gm_sz_in_mb << 20;
>> +	} else if (intel_vgpu_active(dev)) {
>> +		mappable_base = I915_READ(
>> +			vgtif_reg(avail_rs.mappable_gmadr.base));
>> +		mappable_size = I915_READ(
>> +			vgtif_reg(avail_rs.mappable_gmadr.size));
>> +		unmappable_base = I915_READ(
>> +			vgtif_reg(avail_rs.nonmappable_gmadr.base));
>> +		unmappable_size = I915_READ(
>> +			vgtif_reg(avail_rs.nonmappable_gmadr.size));
>> +	} else {
>> +		return -ENODEV;
>> +	}
>>
>>   	mappable_end = mappable_base + mappable_size;
>>   	unmappable_end = unmappable_base + unmappable_size;
Joonas Lahtinen Feb. 23, 2016, 1:26 p.m. UTC | #3
On ti, 2016-02-23 at 15:16 +0200, Joonas Lahtinen wrote:
> > 
> > On to, 2016-02-18 at 19:42 +0800, Zhi Wang wrote:
> > From: Bing Niu <bing.niu@intel.com>
> > 
> > This patch introduces host graphics memory/fence partition when GVT-g
> > is enabled.
> > 
> > Under GVT-g, i915 host driver only owned limited graphics resources,
> > others are managed by GVT-g resource allocator and kept for other vGPUs.
> > 
> > v2:
> > - Address all coding-style comments from Joonas previously.
> > - Fix errors and warnning reported by checkpatch.pl. (Joonas)
> > - Move the graphs into the header files. (Daniel)
> > 
> > Signed-off-by: Bing Niu <bing.niu@intel.com>
> > Signed-off-by: Zhi Wang <zhi.a.wang@intel.com>
> > ---
> >  drivers/gpu/drm/i915/gvt/gvt.c      |  4 ++++
> >  drivers/gpu/drm/i915/gvt/params.c   | 12 ++++++++++++
> >  drivers/gpu/drm/i915/gvt/params.h   |  3 +++
> >  drivers/gpu/drm/i915/i915_drv.h     | 35 +++++++++++++++++++++++++++++++++++
> >  drivers/gpu/drm/i915/i915_gem.c     |  4 +++-
> >  drivers/gpu/drm/i915/i915_gem_gtt.c |  4 ++--
> >  drivers/gpu/drm/i915/i915_vgpu.c    | 21 +++++++++++++++++----
> >  7 files changed, 76 insertions(+), 7 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
> > index 52cfa32..2099b7e 100644
> > --- a/drivers/gpu/drm/i915/gvt/gvt.c
> > +++ b/drivers/gpu/drm/i915/gvt/gvt.c
> > @@ -348,6 +348,10 @@ void *gvt_create_pgt_device(struct drm_i915_private *dev_priv)
> >  		goto err;
> >  	}
> >  
> > +	dev_priv->gvt.host_fence_sz = gvt.host_fence_sz;
> > +	dev_priv->gvt.host_low_gm_sz_in_mb = gvt.host_low_gm_sz;
> > +	dev_priv->gvt.host_high_gm_sz_in_mb = gvt.host_high_gm_sz;
> 
> I'm thinking, could we expose the pgt_device struct (at least
> partially, and then have a PIMPL pattern), to avoid this kind of
> duplication of declarations and unnecessary copies between i915 and
> i915_gvt modules?
> 
> It's little rough that the gvt driver writes to i915_private struct.
> I'd rather see that gvt.host_fence_sz and other variables get sanitized
> and then written to pgt_device (maybe the public part would be
> i915_pgt_device) and used by gvt and i915 code.
> 

Also, using memparse to handle all kernel memory size parameters is a
good idea (see parse_highmem() or related function). That is what users
expect.

> Was this ever considered?
> 

<SNIP>

> > diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
> > index dea7429..7be1435 100644
> > --- a/drivers/gpu/drm/i915/i915_vgpu.c
> > +++ b/drivers/gpu/drm/i915/i915_vgpu.c
> > @@ -188,10 +188,23 @@ int intel_vgt_balloon(struct drm_device *dev)
> >  	unsigned long unmappable_base, unmappable_size, unmappable_end;
> >  	int ret;
> >  
> > -	mappable_base = I915_READ(vgtif_reg(avail_rs.mappable_gmadr.base));
> > -	mappable_size = I915_READ(vgtif_reg(avail_rs.mappable_gmadr.size));
> > -	unmappable_base = I915_READ(vgtif_reg(avail_rs.nonmappable_gmadr.base));
> > -	unmappable_size = I915_READ(vgtif_reg(avail_rs.nonmappable_gmadr.size));
> > +	if (intel_gvt_active(dev)) {
> > +		mappable_base = 0;
> > +		mappable_size = dev_priv->gvt.host_low_gm_sz_in_mb << 20;
> > +		unmappable_base = dev_priv->gtt.mappable_end;
> > +		unmappable_size = dev_priv->gvt.host_high_gm_sz_in_mb << 20;

This could be avoided too.

> > +	} else if (intel_vgpu_active(dev)) {
> > +		mappable_base = I915_READ(
> > +			vgtif_reg(avail_rs.mappable_gmadr.base));
> > +		mappable_size = I915_READ(
> > +			vgtif_reg(avail_rs.mappable_gmadr.size));
> > +		unmappable_base = I915_READ(
> > +			vgtif_reg(avail_rs.nonmappable_gmadr.base));
> > +		unmappable_size = I915_READ(
> > +			vgtif_reg(avail_rs.nonmappable_gmadr.size));
> > +	} else {
> > +		return -ENODEV;
> > +	}
> >  
> >  	mappable_end = mappable_base + mappable_size;
> >  	unmappable_end = unmappable_base + unmappable_size;
Tian, Kevin Feb. 24, 2016, 7:42 a.m. UTC | #4
> From: Wang, Zhi A

> Sent: Tuesday, February 23, 2016 9:23 PM

> >> --- a/drivers/gpu/drm/i915/gvt/gvt.c

> >> +++ b/drivers/gpu/drm/i915/gvt/gvt.c

> >> @@ -348,6 +348,10 @@ void *gvt_create_pgt_device(struct drm_i915_private

> *dev_priv)

> >>   		goto err;

> >>   	}

> >>

> >> +	dev_priv->gvt.host_fence_sz = gvt.host_fence_sz;

> >> +	dev_priv->gvt.host_low_gm_sz_in_mb = gvt.host_low_gm_sz;

> >> +	dev_priv->gvt.host_high_gm_sz_in_mb = gvt.host_high_gm_sz;

> >

> > I'm thinking, could we expose the pgt_device struct (at least

> > partially, and then have a PIMPL pattern), to avoid this kind of

> > duplication of declarations and unnecessary copies between i915 and

> > i915_gvt modules?

> >

> > It's little rough that the gvt driver writes to i915_private struct.

> > I'd rather see that gvt.host_fence_sz and other variables get sanitized

> > and then written to pgt_device (maybe the public part would be

> > i915_pgt_device) and used by gvt and i915 code.

> >

> > Was this ever considered?

> >

> The previous version do something similar like that, both i915 and gvt

> reads GVT module kernel parameter but considered that GVT modules could

> be configured as "n" in kernel configuration, probably we should let

> i915 to store this information and GVT to configure it if GVT is active?


Agree with Joonas. We don't need another gvt wrap. Let's just expose
pgt_device directly. I believe all other information can be encapsulated
under pgt_device.

btw to match other description in the code, is it clear to rename pgt_device
to gvt_device?

Another minor thing needs Joonas' feedback. Is it usual to capture
all module parameters belonging to one feature structurized together
(like 'gvt' in this patch), or just to leave them directly exposed?

Thanks
Kevin
Tian, Kevin Feb. 24, 2016, 8:22 a.m. UTC | #5
> From: Wang, Zhi A
> Sent: Thursday, February 18, 2016 7:42 PM
> diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
> index 52cfa32..2099b7e 100644
> --- a/drivers/gpu/drm/i915/gvt/gvt.c
> +++ b/drivers/gpu/drm/i915/gvt/gvt.c
> @@ -348,6 +348,10 @@ void *gvt_create_pgt_device(struct drm_i915_private
> *dev_priv)
>  		goto err;
>  	}
> 
> +	dev_priv->gvt.host_fence_sz = gvt.host_fence_sz;
> +	dev_priv->gvt.host_low_gm_sz_in_mb = gvt.host_low_gm_sz;
> +	dev_priv->gvt.host_high_gm_sz_in_mb = gvt.host_high_gm_sz;

Do we need hard limiting fence number for host usage here? There is no
continuity requirement as seen for graphics memory, since we do translate
fence# between guest view and host view. So we could make it flexible
as an on-demand allocation when creating a vGPU. Daniel even mentioned
, iirc, that today i915 can dynamically grab a fence register away from 
an application, which could be useful even when host fence usage is high
(not a typical case in server virtualization which runs few applications in host).

> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c
> b/drivers/gpu/drm/i915/i915_gem_gtt.c
> index 9127f8f..de09dd4 100644
> --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
> @@ -2734,7 +2734,7 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
>  	i915_address_space_init(ggtt_vm, dev_priv);
>  	ggtt_vm->total += PAGE_SIZE;
> 
> -	if (intel_vgpu_active(dev)) {
> +	if (intel_vgpu_active(dev) || intel_gvt_active(dev)) {

above two conditions are bit confusing for others not familiar with 
this technology. vgpu_active is for driver running in a VM, while
gvt_active is for driver running in host. Could we introduce a better 
name, or at least wrap them into a more meaningful macro like
intel_ballooning_required?

>  		ret = intel_vgt_balloon(dev);

I saw several comments whether ballooning is a right term here,
since we only do static reservation so far. How about renaming it
to intel_reserve_gm_resource to be more clear? In the future even
when we want to add true dynamic ballooning feature, it will be
largely refactored anyway. :-)

Thanks
Kevin
Joonas Lahtinen Feb. 25, 2016, 1:13 p.m. UTC | #6
On ke, 2016-02-24 at 07:42 +0000, Tian, Kevin wrote:
> > From: Wang, Zhi A
> > Sent: Tuesday, February 23, 2016 9:23 PM
> > > > --- a/drivers/gpu/drm/i915/gvt/gvt.c
> > > > +++ b/drivers/gpu/drm/i915/gvt/gvt.c
> > > > @@ -348,6 +348,10 @@ void *gvt_create_pgt_device(struct drm_i915_private
> > *dev_priv)
> > > >   		goto err;
> > > >   	}
> > > > 
> > > > +	dev_priv->gvt.host_fence_sz = gvt.host_fence_sz;
> > > > +	dev_priv->gvt.host_low_gm_sz_in_mb = gvt.host_low_gm_sz;
> > > > +	dev_priv->gvt.host_high_gm_sz_in_mb = gvt.host_high_gm_sz;
> > > 
> > > I'm thinking, could we expose the pgt_device struct (at least
> > > partially, and then have a PIMPL pattern), to avoid this kind of
> > > duplication of declarations and unnecessary copies between i915 and
> > > i915_gvt modules?
> > > 
> > > It's little rough that the gvt driver writes to i915_private struct.
> > > I'd rather see that gvt.host_fence_sz and other variables get sanitized
> > > and then written to pgt_device (maybe the public part would be
> > > i915_pgt_device) and used by gvt and i915 code.
> > > 
> > > Was this ever considered?
> > > 
> > The previous version do something similar like that, both i915 and gvt
> > reads GVT module kernel parameter but considered that GVT modules could
> > be configured as "n" in kernel configuration, probably we should let
> > i915 to store this information and GVT to configure it if GVT is active?
> 
> Agree with Joonas. We don't need another gvt wrap. Let's just expose
> pgt_device directly. I believe all other information can be encapsulated
> under pgt_device.
> 
> btw to match other description in the code, is it clear to rename pgt_device
> to gvt_device?
> 
> Another minor thing needs Joonas' feedback. Is it usual to capture
> all module parameters belonging to one feature structurized together
> (like 'gvt' in this patch), or just to leave them directly exposed?
> 

I think it's a good idea to group them as they're currently grouped.

Regards, Joonas

> Thanks
> Kevin
Wang, Zhi A Feb. 26, 2016, 5:21 a.m. UTC | #7
On 02/24/16 15:42, Tian, Kevin wrote:
>> From: Wang, Zhi A
>> Sent: Tuesday, February 23, 2016 9:23 PM
>>>> --- a/drivers/gpu/drm/i915/gvt/gvt.c
>>>> +++ b/drivers/gpu/drm/i915/gvt/gvt.c
>>>> @@ -348,6 +348,10 @@ void *gvt_create_pgt_device(struct drm_i915_private
>> *dev_priv)
>>>>    		goto err;
>>>>    	}
>>>>
>>>> +	dev_priv->gvt.host_fence_sz = gvt.host_fence_sz;
>>>> +	dev_priv->gvt.host_low_gm_sz_in_mb = gvt.host_low_gm_sz;
>>>> +	dev_priv->gvt.host_high_gm_sz_in_mb = gvt.host_high_gm_sz;
>>>
>>> I'm thinking, could we expose the pgt_device struct (at least
>>> partially, and then have a PIMPL pattern), to avoid this kind of
>>> duplication of declarations and unnecessary copies between i915 and
>>> i915_gvt modules?
>>>
>>> It's little rough that the gvt driver writes to i915_private struct.
>>> I'd rather see that gvt.host_fence_sz and other variables get sanitized
>>> and then written to pgt_device (maybe the public part would be
>>> i915_pgt_device) and used by gvt and i915 code.
>>>
>>> Was this ever considered?
>>>
>> The previous version do something similar like that, both i915 and gvt
>> reads GVT module kernel parameter but considered that GVT modules could
>> be configured as "n" in kernel configuration, probably we should let
>> i915 to store this information and GVT to configure it if GVT is active?
>
> Agree with Joonas. We don't need another gvt wrap. Let's just expose
> pgt_device directly. I believe all other information can be encapsulated
> under pgt_device.
>
How about this scheme:

1. Move GVT kernel parameter into intel_gvt.{h, c}
2. Sanitize the partition configuration for host in intel_gvt.c
3. If CONFIG_DRM_I915_GVT = y, write the configuration into pgt_device 
to inform GVT resource allocator ranges owned by host

> btw to match other description in the code, is it clear to rename pgt_device
> to gvt_device?
>

For the name of GVT physical device, if we use "gvt_device", it looks a 
bit weird when both "gvt_device" and "vgt_device"(vGPU instance) 
appeared in our code? :( And "pgpu" and "vgpu" also looks weird...

> Another minor thing needs Joonas' feedback. Is it usual to capture
> all module parameters belonging to one feature structurized together
> (like 'gvt' in this patch), or just to leave them directly exposed?
>


> Thanks
> Kevin
>
Wang, Zhi A Feb. 26, 2016, 5:29 a.m. UTC | #8
On 02/24/16 16:22, Tian, Kevin wrote:
>> From: Wang, Zhi A
>> Sent: Thursday, February 18, 2016 7:42 PM
>> diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
>> index 52cfa32..2099b7e 100644
>> --- a/drivers/gpu/drm/i915/gvt/gvt.c
>> +++ b/drivers/gpu/drm/i915/gvt/gvt.c
>> @@ -348,6 +348,10 @@ void *gvt_create_pgt_device(struct drm_i915_private
>> *dev_priv)
>>   		goto err;
>>   	}
>>
>> +	dev_priv->gvt.host_fence_sz = gvt.host_fence_sz;
>> +	dev_priv->gvt.host_low_gm_sz_in_mb = gvt.host_low_gm_sz;
>> +	dev_priv->gvt.host_high_gm_sz_in_mb = gvt.host_high_gm_sz;
>
> Do we need hard limiting fence number for host usage here? There is no
> continuity requirement as seen for graphics memory, since we do translate
> fence# between guest view and host view. So we could make it flexible
> as an on-demand allocation when creating a vGPU. Daniel even mentioned
> , iirc, that today i915 can dynamically grab a fence register away from
> an application, which could be useful even when host fence usage is high
> (not a typical case in server virtualization which runs few applications in host).
>
Yes. They steal fence in aperture mapping fault handler. We could steal 
the fence registers from i915 as well. Let me see the effort here. :)

>> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c
>> b/drivers/gpu/drm/i915/i915_gem_gtt.c
>> index 9127f8f..de09dd4 100644
>> --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
>> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
>> @@ -2734,7 +2734,7 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
>>   	i915_address_space_init(ggtt_vm, dev_priv);
>>   	ggtt_vm->total += PAGE_SIZE;
>>
>> -	if (intel_vgpu_active(dev)) {
>> +	if (intel_vgpu_active(dev) || intel_gvt_active(dev)) {
>
> above two conditions are bit confusing for others not familiar with
> this technology. vgpu_active is for driver running in a VM, while
> gvt_active is for driver running in host. Could we introduce a better
> name, or at least wrap them into a more meaningful macro like
> intel_ballooning_required?
>
>>   		ret = intel_vgt_balloon(dev);
>
> I saw several comments whether ballooning is a right term here,
> since we only do static reservation so far. How about renaming it
> to intel_reserve_gm_resource to be more clear? In the future even
> when we want to add true dynamic ballooning feature, it will be
> largely refactored anyway. :-)
>
Sure, will do that.
> Thanks
> Kevin
>
Joonas Lahtinen Feb. 26, 2016, 1:54 p.m. UTC | #9
Hi,

On pe, 2016-02-26 at 13:21 +0800, Zhi Wang wrote:
> 
> On 02/24/16 15:42, Tian, Kevin wrote:
> > 
> > > 
> > > From: Wang, Zhi A
> > > Sent: Tuesday, February 23, 2016 9:23 PM
> > > > 
> > > > > 
> > > > > --- a/drivers/gpu/drm/i915/gvt/gvt.c
> > > > > +++ b/drivers/gpu/drm/i915/gvt/gvt.c
> > > > > @@ -348,6 +348,10 @@ void *gvt_create_pgt_device(struct drm_i915_private
> > > *dev_priv)
> > > > 
> > > > > 
> > > > >    		goto err;
> > > > >    	}
> > > > > 
> > > > > +	dev_priv->gvt.host_fence_sz = gvt.host_fence_sz;
> > > > > +	dev_priv->gvt.host_low_gm_sz_in_mb = gvt.host_low_gm_sz;
> > > > > +	dev_priv->gvt.host_high_gm_sz_in_mb = gvt.host_high_gm_sz;
> > > > I'm thinking, could we expose the pgt_device struct (at least
> > > > partially, and then have a PIMPL pattern), to avoid this kind of
> > > > duplication of declarations and unnecessary copies between i915 and
> > > > i915_gvt modules?
> > > > 
> > > > It's little rough that the gvt driver writes to i915_private struct.
> > > > I'd rather see that gvt.host_fence_sz and other variables get sanitized
> > > > and then written to pgt_device (maybe the public part would be
> > > > i915_pgt_device) and used by gvt and i915 code.
> > > > 
> > > > Was this ever considered?
> > > > 
> > > The previous version do something similar like that, both i915 and gvt
> > > reads GVT module kernel parameter but considered that GVT modules could
> > > be configured as "n" in kernel configuration, probably we should let
> > > i915 to store this information and GVT to configure it if GVT is active?
> > Agree with Joonas. We don't need another gvt wrap. Let's just expose
> > pgt_device directly. I believe all other information can be encapsulated
> > under pgt_device.
> > 
> How about this scheme:
> 
> 1. Move GVT kernel parameter into intel_gvt.{h, c}
> 2. Sanitize the partition configuration for host in intel_gvt.c
> 3. If CONFIG_DRM_I915_GVT = y, write the configuration into pgt_device 
> to inform GVT resource allocator ranges owned by host
> 

This sounds fine, if i915 driver wants to know about the gvt driver, it
will read its structure (if gvt was enabled), instead of gvt driver
pushing information to i915.

> > 
> > btw to match other description in the code, is it clear to rename pgt_device
> > to gvt_device?
> > 
> For the name of GVT physical device, if we use "gvt_device", it looks a 
> bit weird when both "gvt_device" and "vgt_device"(vGPU instance) 
> appeared in our code? :( And "pgpu" and "vgpu" also looks weird...
> 

The naming conventions are indeed very confusing, it would help if all
host related stuff was named gvt_* and client vgpu_*

Regards, Joonas

> > 
> > Another minor thing needs Joonas' feedback. Is it usual to capture
> > all module parameters belonging to one feature structurized together
> > (like 'gvt' in this patch), or just to leave them directly exposed?
> > 
> 
> > 
> > Thanks
> > Kevin
> >
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index 52cfa32..2099b7e 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -348,6 +348,10 @@  void *gvt_create_pgt_device(struct drm_i915_private *dev_priv)
 		goto err;
 	}
 
+	dev_priv->gvt.host_fence_sz = gvt.host_fence_sz;
+	dev_priv->gvt.host_low_gm_sz_in_mb = gvt.host_low_gm_sz;
+	dev_priv->gvt.host_high_gm_sz_in_mb = gvt.host_high_gm_sz;
+
 	gvt_dbg_core("pgt device creation done, id %d", pdev->id);
 
 	return pdev;
diff --git a/drivers/gpu/drm/i915/gvt/params.c b/drivers/gpu/drm/i915/gvt/params.c
index d381d17..75195fd 100644
--- a/drivers/gpu/drm/i915/gvt/params.c
+++ b/drivers/gpu/drm/i915/gvt/params.c
@@ -26,7 +26,19 @@ 
 struct gvt_kernel_params gvt = {
 	.enable = false,
 	.debug = 0,
+	.host_low_gm_sz = 96,	/* in MB */
+	.host_high_gm_sz = 384, /* in MB */
+	.host_fence_sz = 4,
 };
 
 module_param_named(gvt_enable, gvt.enable, bool, 0600);
 MODULE_PARM_DESC(gvt_enable, "Enable Intel GVT-g host support");
+
+module_param_named(gvt_host_low_gm_sz, gvt.host_low_gm_sz, int, 0600);
+MODULE_PARM_DESC(gvt_host_low_gm_sz, "Amount of aperture size of host (in MB)");
+
+module_param_named(gvt_host_high_gm_sz, gvt.host_high_gm_sz, int, 0600);
+MODULE_PARM_DESC(gvt_host_high_gm_sz, "Amount of high memory size of host (in MB)");
+
+module_param_named(gvt_host_fence_sz, gvt.host_fence_sz, int, 0600);
+MODULE_PARM_DESC(gvt_host_fence_sz, "Amount of fence size of host (in MB)");
diff --git a/drivers/gpu/drm/i915/gvt/params.h b/drivers/gpu/drm/i915/gvt/params.h
index d2955b9..f4e9356 100644
--- a/drivers/gpu/drm/i915/gvt/params.h
+++ b/drivers/gpu/drm/i915/gvt/params.h
@@ -27,6 +27,9 @@ 
 struct gvt_kernel_params {
 	bool enable;
 	int debug;
+	int host_low_gm_sz;
+	int host_high_gm_sz;
+	int host_fence_sz;
 };
 
 extern struct gvt_kernel_params gvt;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 2f897c3..1fd5575 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1705,8 +1705,43 @@  struct i915_workarounds {
 	u32 hw_whitelist_count[I915_NUM_RINGS];
 };
 
+/*
+ * Under GVT-g, i915 host driver only owned limited graphics resources,
+ * others are managed by GVT-g resource allocator and kept for other vGPUs.
+ *
+ * For graphics memory space partition, a typical layout looks like:
+ *
+ * +-------+-----------------------+------+-----------------------+
+ * |* Host |   *GVT-g Resource     |* Host|   *GVT-g Resource     |
+ * | Owned |   Allocator Managed   | Owned|   Allocator Managed   |
+ * |       |                       |      |                       |
+ * +---------------+-------+----------------------+-------+-------+
+ * |       |       |       |       |      |       |       |       |
+ * | i915  | vm 1  | vm 2  | vm 3  | i915 | vm 1  | vm 2  | vm 3  |
+ * |       |       |       |       |      |       |       |       |
+ * +-------+-------+-------+--------------+-------+-------+-------+
+ * |           Aperture            |            Hidden            |
+ * +-------------------------------+------------------------------+
+ * |                       GGTT memory space                      |
+ * +--------------------------------------------------------------+
+ *
+ * Similar with fence registers partition:
+ *
+ * +------ +-----------------------+
+ * | * Host|    GVT-g Resource     |
+ * | Owned |   Allocator Managed   +
+ * 0       |                       31
+ * +---------------+-------+-------+
+ * |       |       |       |       |
+ * | i915  | vm 1  | vm 2  | vm 3  |
+ * |       |       |       |       |
+ * +-------+-------+-------+-------+
+ */
 struct i915_gvt {
 	void *pgt_device;
+	u32 host_low_gm_sz_in_mb;
+	u32 host_high_gm_sz_in_mb;
+	int host_fence_sz;
 };
 
 struct i915_virtual_gpu {
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index f68f346..1c0006a 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -5078,7 +5078,9 @@  i915_gem_load_init(struct drm_device *dev)
 	else
 		dev_priv->num_fence_regs = 8;
 
-	if (intel_vgpu_active(dev))
+	if (intel_gvt_active(dev))
+		dev_priv->num_fence_regs = dev_priv->gvt.host_fence_sz;
+	else if (intel_vgpu_active(dev))
 		dev_priv->num_fence_regs =
 				I915_READ(vgtif_reg(avail_rs.fence_num));
 
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 9127f8f..de09dd4 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2734,7 +2734,7 @@  static int i915_gem_setup_global_gtt(struct drm_device *dev,
 	i915_address_space_init(ggtt_vm, dev_priv);
 	ggtt_vm->total += PAGE_SIZE;
 
-	if (intel_vgpu_active(dev)) {
+	if (intel_vgpu_active(dev) || intel_gvt_active(dev)) {
 		ret = intel_vgt_balloon(dev);
 		if (ret)
 			return ret;
@@ -2833,7 +2833,7 @@  void i915_global_gtt_cleanup(struct drm_device *dev)
 	i915_gem_cleanup_stolen(dev);
 
 	if (drm_mm_initialized(&vm->mm)) {
-		if (intel_vgpu_active(dev))
+		if (intel_vgpu_active(dev) || intel_gvt_active(dev))
 			intel_vgt_deballoon();
 
 		drm_mm_takedown(&vm->mm);
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index dea7429..7be1435 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -188,10 +188,23 @@  int intel_vgt_balloon(struct drm_device *dev)
 	unsigned long unmappable_base, unmappable_size, unmappable_end;
 	int ret;
 
-	mappable_base = I915_READ(vgtif_reg(avail_rs.mappable_gmadr.base));
-	mappable_size = I915_READ(vgtif_reg(avail_rs.mappable_gmadr.size));
-	unmappable_base = I915_READ(vgtif_reg(avail_rs.nonmappable_gmadr.base));
-	unmappable_size = I915_READ(vgtif_reg(avail_rs.nonmappable_gmadr.size));
+	if (intel_gvt_active(dev)) {
+		mappable_base = 0;
+		mappable_size = dev_priv->gvt.host_low_gm_sz_in_mb << 20;
+		unmappable_base = dev_priv->gtt.mappable_end;
+		unmappable_size = dev_priv->gvt.host_high_gm_sz_in_mb << 20;
+	} else if (intel_vgpu_active(dev)) {
+		mappable_base = I915_READ(
+			vgtif_reg(avail_rs.mappable_gmadr.base));
+		mappable_size = I915_READ(
+			vgtif_reg(avail_rs.mappable_gmadr.size));
+		unmappable_base = I915_READ(
+			vgtif_reg(avail_rs.nonmappable_gmadr.base));
+		unmappable_size = I915_READ(
+			vgtif_reg(avail_rs.nonmappable_gmadr.size));
+	} else {
+		return -ENODEV;
+	}
 
 	mappable_end = mappable_base + mappable_size;
 	unmappable_end = unmappable_base + unmappable_size;