diff mbox series

[1/2] drm/i915: Factor out i915_ggtt_suspend_vm/i915_ggtt_resume_vm()

Message ID 20211101183551.3580546-1-imre.deak@intel.com (mailing list archive)
State New, archived
Headers show
Series [1/2] drm/i915: Factor out i915_ggtt_suspend_vm/i915_ggtt_resume_vm() | expand

Commit Message

Imre Deak Nov. 1, 2021, 6:35 p.m. UTC
Factor out functions that are needed by the next patch to suspend/resume
the memory mappings for DPT FBs.

No functional change, except reordering during suspend the
ggtt->invalidate(ggtt) call wrt. atomic_set(&ggtt->vm.open, open) and
mutex_unlock(&ggtt->vm.mutex). This shouldn't matter due to the i915
suspend sequence being single threaded.

Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Signed-off-by: Imre Deak <imre.deak@intel.com>
---
 drivers/gpu/drm/i915/gt/intel_ggtt.c | 71 +++++++++++++++++++++-------
 drivers/gpu/drm/i915/gt/intel_gtt.h  |  2 +
 2 files changed, 56 insertions(+), 17 deletions(-)

Comments

Ville Syrjälä Nov. 2, 2021, 2:15 p.m. UTC | #1
On Mon, Nov 01, 2021 at 08:35:50PM +0200, Imre Deak wrote:
> Factor out functions that are needed by the next patch to suspend/resume
> the memory mappings for DPT FBs.
> 
> No functional change, except reordering during suspend the
> ggtt->invalidate(ggtt) call wrt. atomic_set(&ggtt->vm.open, open) and
> mutex_unlock(&ggtt->vm.mutex). This shouldn't matter due to the i915
> suspend sequence being single threaded.
> 
> Cc: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
> Signed-off-by: Imre Deak <imre.deak@intel.com>
> ---
>  drivers/gpu/drm/i915/gt/intel_ggtt.c | 71 +++++++++++++++++++++-------
>  drivers/gpu/drm/i915/gt/intel_gtt.h  |  2 +
>  2 files changed, 56 insertions(+), 17 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
> index f17383e76eb71..834dc1b6a0729 100644
> --- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
> +++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
> @@ -116,17 +116,26 @@ static bool needs_idle_maps(struct drm_i915_private *i915)
>  	return false;
>  }
>  
> -void i915_ggtt_suspend(struct i915_ggtt *ggtt)
> +/**
> + * i915_ggtt_suspend_vm - Suspend the memory mappings for a GGTT or DPT VM
> + * @vm: The VM to suspend the mappings for
> + *
> + * Suspend the memory mappings for all objects mapped to HW via the GGTT or a
> + * DPT page table.
> + */
> +void i915_ggtt_suspend_vm(struct i915_address_space *vm)
>  {
>  	struct i915_vma *vma, *vn;
>  	int open;
>  
> -	mutex_lock(&ggtt->vm.mutex);
> +	drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt);
> +
> +	mutex_lock(&vm->mutex);
>  
>  	/* Skip rewriting PTE on VMA unbind. */
> -	open = atomic_xchg(&ggtt->vm.open, 0);
> +	open = atomic_xchg(&vm->open, 0);
>  
> -	list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) {
> +	list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) {
>  		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
>  		i915_vma_wait_for_bind(vma);
>  
> @@ -139,11 +148,17 @@ void i915_ggtt_suspend(struct i915_ggtt *ggtt)
>  		}
>  	}
>  
> -	ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
> +	vm->clear_range(vm, 0, vm->total);
> +
> +	atomic_set(&vm->open, open);
> +
> +	mutex_unlock(&vm->mutex);
> +}
> +
> +void i915_ggtt_suspend(struct i915_ggtt *ggtt)
> +{
> +	i915_ggtt_suspend_vm(&ggtt->vm);
>  	ggtt->invalidate(ggtt);
> -	atomic_set(&ggtt->vm.open, open);
> -
> -	mutex_unlock(&ggtt->vm.mutex);
>  
>  	intel_gt_check_and_clear_faults(ggtt->vm.gt);
>  }
> @@ -1253,37 +1268,59 @@ void i915_ggtt_disable_guc(struct i915_ggtt *ggtt)
>  	ggtt->invalidate(ggtt);
>  }
>  
> -void i915_ggtt_resume(struct i915_ggtt *ggtt)
> +/**
> + * i915_ggtt_resume_vm - Restore the memory mappings for a GGTT or DPT VM
> + * @vm: The VM to restore the mappings for
> + *
> + * Restore the memory mappings for all objects mapped to HW via the GGTT or a
> + * DPT page table.
> + *
> + * Returns %true if restoring the mapping for any object that was in a write
> + * domain before suspend.
> + */
> +bool i915_ggtt_resume_vm(struct i915_address_space *vm)
>  {
>  	struct i915_vma *vma;
> -	bool flush = false;
> +	bool write_domain_objs = false;
>  	int open;
>  
> -	intel_gt_check_and_clear_faults(ggtt->vm.gt);
> +	drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt);
>  
>  	/* First fill our portion of the GTT with scratch pages */
> -	ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
> +	vm->clear_range(vm, 0, vm->total);
>  
>  	/* Skip rewriting PTE on VMA unbind. */
> -	open = atomic_xchg(&ggtt->vm.open, 0);
> +	open = atomic_xchg(&vm->open, 0);
>  
>  	/* clflush objects bound into the GGTT and rebind them. */
> -	list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link) {
> +	list_for_each_entry(vma, &vm->bound_list, vm_link) {
>  		struct drm_i915_gem_object *obj = vma->obj;
>  		unsigned int was_bound =
>  			atomic_read(&vma->flags) & I915_VMA_BIND_MASK;
>  
>  		GEM_BUG_ON(!was_bound);
> -		vma->ops->bind_vma(&ggtt->vm, NULL, vma,
> +		vma->ops->bind_vma(vm, NULL, vma,
>  				   obj ? obj->cache_level : 0,
>  				   was_bound);

Can we even get here with DPT? Ie. shouldn't everything have been 
thrown out during suspend? Maybe we should WARN if any DPT stuff
is still bound during resume?

Other than that this looks good to me. Series is
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>

>  		if (obj) { /* only used during resume => exclusive access */
> -			flush |= fetch_and_zero(&obj->write_domain);
> +			write_domain_objs |= fetch_and_zero(&obj->write_domain);
>  			obj->read_domains |= I915_GEM_DOMAIN_GTT;
>  		}
>  	}
>  
> -	atomic_set(&ggtt->vm.open, open);
> +	atomic_set(&vm->open, open);
> +
> +	return write_domain_objs;
> +}
> +
> +void i915_ggtt_resume(struct i915_ggtt *ggtt)
> +{
> +	bool flush;
> +
> +	intel_gt_check_and_clear_faults(ggtt->vm.gt);
> +
> +	flush = i915_ggtt_resume_vm(&ggtt->vm);
> +
>  	ggtt->invalidate(ggtt);
>  
>  	if (flush)
> diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h
> index bc67502633599..dfeaef680aacd 100644
> --- a/drivers/gpu/drm/i915/gt/intel_gtt.h
> +++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
> @@ -544,6 +544,8 @@ int i915_ppgtt_init_hw(struct intel_gt *gt);
>  struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt,
>  				     unsigned long lmem_pt_obj_flags);
>  
> +void i915_ggtt_suspend_vm(struct i915_address_space *vm);
> +bool i915_ggtt_resume_vm(struct i915_address_space *vm);
>  void i915_ggtt_suspend(struct i915_ggtt *gtt);
>  void i915_ggtt_resume(struct i915_ggtt *ggtt);
>  
> -- 
> 2.27.0
Imre Deak Nov. 2, 2021, 2:32 p.m. UTC | #2
On Tue, Nov 02, 2021 at 04:15:18PM +0200, Ville Syrjälä wrote:
> On Mon, Nov 01, 2021 at 08:35:50PM +0200, Imre Deak wrote:
> > Factor out functions that are needed by the next patch to suspend/resume
> > the memory mappings for DPT FBs.
> > 
> > No functional change, except reordering during suspend the
> > ggtt->invalidate(ggtt) call wrt. atomic_set(&ggtt->vm.open, open) and
> > mutex_unlock(&ggtt->vm.mutex). This shouldn't matter due to the i915
> > suspend sequence being single threaded.
> > 
> > Cc: Chris Wilson <chris@chris-wilson.co.uk>
> > Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
> > Signed-off-by: Imre Deak <imre.deak@intel.com>
> > ---
> >  drivers/gpu/drm/i915/gt/intel_ggtt.c | 71 +++++++++++++++++++++-------
> >  drivers/gpu/drm/i915/gt/intel_gtt.h  |  2 +
> >  2 files changed, 56 insertions(+), 17 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
> > index f17383e76eb71..834dc1b6a0729 100644
> > --- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
> > +++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
> > @@ -116,17 +116,26 @@ static bool needs_idle_maps(struct drm_i915_private *i915)
> >  	return false;
> >  }
> >  
> > -void i915_ggtt_suspend(struct i915_ggtt *ggtt)
> > +/**
> > + * i915_ggtt_suspend_vm - Suspend the memory mappings for a GGTT or DPT VM
> > + * @vm: The VM to suspend the mappings for
> > + *
> > + * Suspend the memory mappings for all objects mapped to HW via the GGTT or a
> > + * DPT page table.
> > + */
> > +void i915_ggtt_suspend_vm(struct i915_address_space *vm)
> >  {
> >  	struct i915_vma *vma, *vn;
> >  	int open;
> >  
> > -	mutex_lock(&ggtt->vm.mutex);
> > +	drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt);
> > +
> > +	mutex_lock(&vm->mutex);
> >  
> >  	/* Skip rewriting PTE on VMA unbind. */
> > -	open = atomic_xchg(&ggtt->vm.open, 0);
> > +	open = atomic_xchg(&vm->open, 0);
> >  
> > -	list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) {
> > +	list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) {
> >  		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
> >  		i915_vma_wait_for_bind(vma);
> >  
> > @@ -139,11 +148,17 @@ void i915_ggtt_suspend(struct i915_ggtt *ggtt)
> >  		}
> >  	}
> >  
> > -	ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
> > +	vm->clear_range(vm, 0, vm->total);
> > +
> > +	atomic_set(&vm->open, open);
> > +
> > +	mutex_unlock(&vm->mutex);
> > +}
> > +
> > +void i915_ggtt_suspend(struct i915_ggtt *ggtt)
> > +{
> > +	i915_ggtt_suspend_vm(&ggtt->vm);
> >  	ggtt->invalidate(ggtt);
> > -	atomic_set(&ggtt->vm.open, open);
> > -
> > -	mutex_unlock(&ggtt->vm.mutex);
> >  
> >  	intel_gt_check_and_clear_faults(ggtt->vm.gt);
> >  }
> > @@ -1253,37 +1268,59 @@ void i915_ggtt_disable_guc(struct i915_ggtt *ggtt)
> >  	ggtt->invalidate(ggtt);
> >  }
> >  
> > -void i915_ggtt_resume(struct i915_ggtt *ggtt)
> > +/**
> > + * i915_ggtt_resume_vm - Restore the memory mappings for a GGTT or DPT VM
> > + * @vm: The VM to restore the mappings for
> > + *
> > + * Restore the memory mappings for all objects mapped to HW via the GGTT or a
> > + * DPT page table.
> > + *
> > + * Returns %true if restoring the mapping for any object that was in a write
> > + * domain before suspend.
> > + */
> > +bool i915_ggtt_resume_vm(struct i915_address_space *vm)
> >  {
> >  	struct i915_vma *vma;
> > -	bool flush = false;
> > +	bool write_domain_objs = false;
> >  	int open;
> >  
> > -	intel_gt_check_and_clear_faults(ggtt->vm.gt);
> > +	drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt);
> >  
> >  	/* First fill our portion of the GTT with scratch pages */
> > -	ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
> > +	vm->clear_range(vm, 0, vm->total);
> >  
> >  	/* Skip rewriting PTE on VMA unbind. */
> > -	open = atomic_xchg(&ggtt->vm.open, 0);
> > +	open = atomic_xchg(&vm->open, 0);
> >  
> >  	/* clflush objects bound into the GGTT and rebind them. */
> > -	list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link) {
> > +	list_for_each_entry(vma, &vm->bound_list, vm_link) {
> >  		struct drm_i915_gem_object *obj = vma->obj;
> >  		unsigned int was_bound =
> >  			atomic_read(&vma->flags) & I915_VMA_BIND_MASK;
> >  
> >  		GEM_BUG_ON(!was_bound);
> > -		vma->ops->bind_vma(&ggtt->vm, NULL, vma,
> > +		vma->ops->bind_vma(vm, NULL, vma,
> >  				   obj ? obj->cache_level : 0,
> >  				   was_bound);
> 
> Can we even get here with DPT? Ie. shouldn't everything have been 
> thrown out during suspend?

After calling vma->ops->unbind_vma() the DPT object still remains on the
vm->bound_list, it's only supposed to clear the PTE entries (atm we're
not doing that for DPT). Here we re-instate the PTEs for the same
objects still on the bound_list.

> Maybe we should WARN if any DPT stuff is still bound during resume?
> 
> Other than that this looks good to me. Series is
> Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
> 
> >  		if (obj) { /* only used during resume => exclusive access */
> > -			flush |= fetch_and_zero(&obj->write_domain);
> > +			write_domain_objs |= fetch_and_zero(&obj->write_domain);
> >  			obj->read_domains |= I915_GEM_DOMAIN_GTT;
> >  		}
> >  	}
> >  
> > -	atomic_set(&ggtt->vm.open, open);
> > +	atomic_set(&vm->open, open);
> > +
> > +	return write_domain_objs;
> > +}
> > +
> > +void i915_ggtt_resume(struct i915_ggtt *ggtt)
> > +{
> > +	bool flush;
> > +
> > +	intel_gt_check_and_clear_faults(ggtt->vm.gt);
> > +
> > +	flush = i915_ggtt_resume_vm(&ggtt->vm);
> > +
> >  	ggtt->invalidate(ggtt);
> >  
> >  	if (flush)
> > diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h
> > index bc67502633599..dfeaef680aacd 100644
> > --- a/drivers/gpu/drm/i915/gt/intel_gtt.h
> > +++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
> > @@ -544,6 +544,8 @@ int i915_ppgtt_init_hw(struct intel_gt *gt);
> >  struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt,
> >  				     unsigned long lmem_pt_obj_flags);
> >  
> > +void i915_ggtt_suspend_vm(struct i915_address_space *vm);
> > +bool i915_ggtt_resume_vm(struct i915_address_space *vm);
> >  void i915_ggtt_suspend(struct i915_ggtt *gtt);
> >  void i915_ggtt_resume(struct i915_ggtt *ggtt);
> >  
> > -- 
> > 2.27.0
> 
> -- 
> Ville Syrjälä
> Intel
Ville Syrjälä Nov. 2, 2021, 2:40 p.m. UTC | #3
On Tue, Nov 02, 2021 at 04:32:45PM +0200, Imre Deak wrote:
> On Tue, Nov 02, 2021 at 04:15:18PM +0200, Ville Syrjälä wrote:
> > On Mon, Nov 01, 2021 at 08:35:50PM +0200, Imre Deak wrote:
> > > Factor out functions that are needed by the next patch to suspend/resume
> > > the memory mappings for DPT FBs.
> > > 
> > > No functional change, except reordering during suspend the
> > > ggtt->invalidate(ggtt) call wrt. atomic_set(&ggtt->vm.open, open) and
> > > mutex_unlock(&ggtt->vm.mutex). This shouldn't matter due to the i915
> > > suspend sequence being single threaded.
> > > 
> > > Cc: Chris Wilson <chris@chris-wilson.co.uk>
> > > Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
> > > Signed-off-by: Imre Deak <imre.deak@intel.com>
> > > ---
> > >  drivers/gpu/drm/i915/gt/intel_ggtt.c | 71 +++++++++++++++++++++-------
> > >  drivers/gpu/drm/i915/gt/intel_gtt.h  |  2 +
> > >  2 files changed, 56 insertions(+), 17 deletions(-)
> > > 
> > > diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
> > > index f17383e76eb71..834dc1b6a0729 100644
> > > --- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
> > > +++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
> > > @@ -116,17 +116,26 @@ static bool needs_idle_maps(struct drm_i915_private *i915)
> > >  	return false;
> > >  }
> > >  
> > > -void i915_ggtt_suspend(struct i915_ggtt *ggtt)
> > > +/**
> > > + * i915_ggtt_suspend_vm - Suspend the memory mappings for a GGTT or DPT VM
> > > + * @vm: The VM to suspend the mappings for
> > > + *
> > > + * Suspend the memory mappings for all objects mapped to HW via the GGTT or a
> > > + * DPT page table.
> > > + */
> > > +void i915_ggtt_suspend_vm(struct i915_address_space *vm)
> > >  {
> > >  	struct i915_vma *vma, *vn;
> > >  	int open;
> > >  
> > > -	mutex_lock(&ggtt->vm.mutex);
> > > +	drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt);
> > > +
> > > +	mutex_lock(&vm->mutex);
> > >  
> > >  	/* Skip rewriting PTE on VMA unbind. */
> > > -	open = atomic_xchg(&ggtt->vm.open, 0);
> > > +	open = atomic_xchg(&vm->open, 0);
> > >  
> > > -	list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) {
> > > +	list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) {
> > >  		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
> > >  		i915_vma_wait_for_bind(vma);
> > >  
> > > @@ -139,11 +148,17 @@ void i915_ggtt_suspend(struct i915_ggtt *ggtt)
> > >  		}
> > >  	}
> > >  
> > > -	ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
> > > +	vm->clear_range(vm, 0, vm->total);
> > > +
> > > +	atomic_set(&vm->open, open);
> > > +
> > > +	mutex_unlock(&vm->mutex);
> > > +}
> > > +
> > > +void i915_ggtt_suspend(struct i915_ggtt *ggtt)
> > > +{
> > > +	i915_ggtt_suspend_vm(&ggtt->vm);
> > >  	ggtt->invalidate(ggtt);
> > > -	atomic_set(&ggtt->vm.open, open);
> > > -
> > > -	mutex_unlock(&ggtt->vm.mutex);
> > >  
> > >  	intel_gt_check_and_clear_faults(ggtt->vm.gt);
> > >  }
> > > @@ -1253,37 +1268,59 @@ void i915_ggtt_disable_guc(struct i915_ggtt *ggtt)
> > >  	ggtt->invalidate(ggtt);
> > >  }
> > >  
> > > -void i915_ggtt_resume(struct i915_ggtt *ggtt)
> > > +/**
> > > + * i915_ggtt_resume_vm - Restore the memory mappings for a GGTT or DPT VM
> > > + * @vm: The VM to restore the mappings for
> > > + *
> > > + * Restore the memory mappings for all objects mapped to HW via the GGTT or a
> > > + * DPT page table.
> > > + *
> > > + * Returns %true if restoring the mapping for any object that was in a write
> > > + * domain before suspend.
> > > + */
> > > +bool i915_ggtt_resume_vm(struct i915_address_space *vm)
> > >  {
> > >  	struct i915_vma *vma;
> > > -	bool flush = false;
> > > +	bool write_domain_objs = false;
> > >  	int open;
> > >  
> > > -	intel_gt_check_and_clear_faults(ggtt->vm.gt);
> > > +	drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt);
> > >  
> > >  	/* First fill our portion of the GTT with scratch pages */
> > > -	ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
> > > +	vm->clear_range(vm, 0, vm->total);
> > >  
> > >  	/* Skip rewriting PTE on VMA unbind. */
> > > -	open = atomic_xchg(&ggtt->vm.open, 0);
> > > +	open = atomic_xchg(&vm->open, 0);
> > >  
> > >  	/* clflush objects bound into the GGTT and rebind them. */
> > > -	list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link) {
> > > +	list_for_each_entry(vma, &vm->bound_list, vm_link) {
> > >  		struct drm_i915_gem_object *obj = vma->obj;
> > >  		unsigned int was_bound =
> > >  			atomic_read(&vma->flags) & I915_VMA_BIND_MASK;
> > >  
> > >  		GEM_BUG_ON(!was_bound);
> > > -		vma->ops->bind_vma(&ggtt->vm, NULL, vma,
> > > +		vma->ops->bind_vma(vm, NULL, vma,
> > >  				   obj ? obj->cache_level : 0,
> > >  				   was_bound);
> > 
> > Can we even get here with DPT? Ie. shouldn't everything have been 
> > thrown out during suspend?
> 
> After calling vma->ops->unbind_vma() the DPT object still remains on the
> vm->bound_list, it's only supposed to clear the PTE entries (atm we're
> not doing that for DPT). Here we re-instate the PTEs for the same
> objects still on the bound_list.

Ah, I thought we just evict everything not pinned. But I guess not.
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index f17383e76eb71..834dc1b6a0729 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -116,17 +116,26 @@  static bool needs_idle_maps(struct drm_i915_private *i915)
 	return false;
 }
 
-void i915_ggtt_suspend(struct i915_ggtt *ggtt)
+/**
+ * i915_ggtt_suspend_vm - Suspend the memory mappings for a GGTT or DPT VM
+ * @vm: The VM to suspend the mappings for
+ *
+ * Suspend the memory mappings for all objects mapped to HW via the GGTT or a
+ * DPT page table.
+ */
+void i915_ggtt_suspend_vm(struct i915_address_space *vm)
 {
 	struct i915_vma *vma, *vn;
 	int open;
 
-	mutex_lock(&ggtt->vm.mutex);
+	drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt);
+
+	mutex_lock(&vm->mutex);
 
 	/* Skip rewriting PTE on VMA unbind. */
-	open = atomic_xchg(&ggtt->vm.open, 0);
+	open = atomic_xchg(&vm->open, 0);
 
-	list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) {
+	list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) {
 		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
 		i915_vma_wait_for_bind(vma);
 
@@ -139,11 +148,17 @@  void i915_ggtt_suspend(struct i915_ggtt *ggtt)
 		}
 	}
 
-	ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
+	vm->clear_range(vm, 0, vm->total);
+
+	atomic_set(&vm->open, open);
+
+	mutex_unlock(&vm->mutex);
+}
+
+void i915_ggtt_suspend(struct i915_ggtt *ggtt)
+{
+	i915_ggtt_suspend_vm(&ggtt->vm);
 	ggtt->invalidate(ggtt);
-	atomic_set(&ggtt->vm.open, open);
-
-	mutex_unlock(&ggtt->vm.mutex);
 
 	intel_gt_check_and_clear_faults(ggtt->vm.gt);
 }
@@ -1253,37 +1268,59 @@  void i915_ggtt_disable_guc(struct i915_ggtt *ggtt)
 	ggtt->invalidate(ggtt);
 }
 
-void i915_ggtt_resume(struct i915_ggtt *ggtt)
+/**
+ * i915_ggtt_resume_vm - Restore the memory mappings for a GGTT or DPT VM
+ * @vm: The VM to restore the mappings for
+ *
+ * Restore the memory mappings for all objects mapped to HW via the GGTT or a
+ * DPT page table.
+ *
+ * Returns %true if restoring the mapping for any object that was in a write
+ * domain before suspend.
+ */
+bool i915_ggtt_resume_vm(struct i915_address_space *vm)
 {
 	struct i915_vma *vma;
-	bool flush = false;
+	bool write_domain_objs = false;
 	int open;
 
-	intel_gt_check_and_clear_faults(ggtt->vm.gt);
+	drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt);
 
 	/* First fill our portion of the GTT with scratch pages */
-	ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
+	vm->clear_range(vm, 0, vm->total);
 
 	/* Skip rewriting PTE on VMA unbind. */
-	open = atomic_xchg(&ggtt->vm.open, 0);
+	open = atomic_xchg(&vm->open, 0);
 
 	/* clflush objects bound into the GGTT and rebind them. */
-	list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link) {
+	list_for_each_entry(vma, &vm->bound_list, vm_link) {
 		struct drm_i915_gem_object *obj = vma->obj;
 		unsigned int was_bound =
 			atomic_read(&vma->flags) & I915_VMA_BIND_MASK;
 
 		GEM_BUG_ON(!was_bound);
-		vma->ops->bind_vma(&ggtt->vm, NULL, vma,
+		vma->ops->bind_vma(vm, NULL, vma,
 				   obj ? obj->cache_level : 0,
 				   was_bound);
 		if (obj) { /* only used during resume => exclusive access */
-			flush |= fetch_and_zero(&obj->write_domain);
+			write_domain_objs |= fetch_and_zero(&obj->write_domain);
 			obj->read_domains |= I915_GEM_DOMAIN_GTT;
 		}
 	}
 
-	atomic_set(&ggtt->vm.open, open);
+	atomic_set(&vm->open, open);
+
+	return write_domain_objs;
+}
+
+void i915_ggtt_resume(struct i915_ggtt *ggtt)
+{
+	bool flush;
+
+	intel_gt_check_and_clear_faults(ggtt->vm.gt);
+
+	flush = i915_ggtt_resume_vm(&ggtt->vm);
+
 	ggtt->invalidate(ggtt);
 
 	if (flush)
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h
index bc67502633599..dfeaef680aacd 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
@@ -544,6 +544,8 @@  int i915_ppgtt_init_hw(struct intel_gt *gt);
 struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt,
 				     unsigned long lmem_pt_obj_flags);
 
+void i915_ggtt_suspend_vm(struct i915_address_space *vm);
+bool i915_ggtt_resume_vm(struct i915_address_space *vm);
 void i915_ggtt_suspend(struct i915_ggtt *gtt);
 void i915_ggtt_resume(struct i915_ggtt *ggtt);