diff mbox series

[RFC,04/10] drm/i915: make more uncore function work on intel_uncore

Message ID 20190313231319.711-5-daniele.ceraolospurio@intel.com (mailing list archive)
State New, archived
Headers show
Series Compartmentalize uncore code | expand

Commit Message

Daniele Ceraolo Spurio March 13, 2019, 11:13 p.m. UTC
Move the init, fini, prune, suspend, resume function to work on
intel_uncore instead of dev_priv

Cc: Paulo Zanoni <paulo.r.zanoni@intel.com>
Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
---
 drivers/gpu/drm/i915/i915_drv.c               |  20 +-
 drivers/gpu/drm/i915/intel_uncore.c           | 290 +++++++++---------
 drivers/gpu/drm/i915/intel_uncore.h           |  12 +-
 .../gpu/drm/i915/selftests/mock_gem_device.c  |   2 +-
 drivers/gpu/drm/i915/selftests/mock_uncore.c  |   6 +-
 drivers/gpu/drm/i915/selftests/mock_uncore.h  |   2 +-
 6 files changed, 168 insertions(+), 164 deletions(-)

Comments

Zanoni, Paulo R March 15, 2019, 10:17 p.m. UTC | #1
Em qua, 2019-03-13 às 16:13 -0700, Daniele Ceraolo Spurio escreveu:
> Move the init, fini, prune, suspend, resume function to work on
> intel_uncore instead of dev_priv
> 

A common theme in this series is the last sentence of a commit message
missing the final period ("."). Please fix all of them :).

I think the s/dev_priv/i915/ here is a little unnecessary since it
inflates the diff even more (when will we settle with a single name?),
but okay let's go with it.

Patch still worth on its own IMHO due to all the dev_priv->uncore to
just uncore reduction.

Reviewed-by: Paulo Zanoni <paulo.r.zanoni@intel.com>

> Cc: Paulo Zanoni <paulo.r.zanoni@intel.com>
> Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
> ---
>  drivers/gpu/drm/i915/i915_drv.c               |  20 +-
>  drivers/gpu/drm/i915/intel_uncore.c           | 290 +++++++++---------
>  drivers/gpu/drm/i915/intel_uncore.h           |  12 +-
>  .../gpu/drm/i915/selftests/mock_gem_device.c  |   2 +-
>  drivers/gpu/drm/i915/selftests/mock_uncore.c  |   6 +-
>  drivers/gpu/drm/i915/selftests/mock_uncore.h  |   2 +-
>  6 files changed, 168 insertions(+), 164 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
> index 4ace6fadfbc2..a2e039f523c0 100644
> --- a/drivers/gpu/drm/i915/i915_drv.c
> +++ b/drivers/gpu/drm/i915/i915_drv.c
> @@ -993,11 +993,11 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
>  	if (ret < 0)
>  		goto err_bridge;
>  
> -	intel_uncore_init(dev_priv);
> +	intel_uncore_init(&dev_priv->uncore);
>  
>  	intel_device_info_init_mmio(dev_priv);
>  
> -	intel_uncore_prune(dev_priv);
> +	intel_uncore_prune(&dev_priv->uncore);
>  
>  	intel_uc_init_mmio(dev_priv);
>  
> @@ -1010,7 +1010,7 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
>  	return 0;
>  
>  err_uncore:
> -	intel_uncore_fini(dev_priv);
> +	intel_uncore_fini(&dev_priv->uncore);
>  	i915_mmio_cleanup(dev_priv);
>  err_bridge:
>  	pci_dev_put(dev_priv->bridge_dev);
> @@ -1024,7 +1024,7 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
>   */
>  static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
>  {
> -	intel_uncore_fini(dev_priv);
> +	intel_uncore_fini(&dev_priv->uncore);
>  	i915_mmio_cleanup(dev_priv);
>  	pci_dev_put(dev_priv->bridge_dev);
>  }
> @@ -2086,7 +2086,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
>  
>  	i915_gem_suspend_late(dev_priv);
>  
> -	intel_uncore_suspend(dev_priv);
> +	intel_uncore_suspend(&dev_priv->uncore);
>  
>  	intel_power_domains_suspend(dev_priv,
>  				    get_suspend_mode(dev_priv, hibernation));
> @@ -2282,7 +2282,9 @@ static int i915_drm_resume_early(struct drm_device *dev)
>  		DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
>  			  ret);
>  
> -	intel_uncore_resume_early(dev_priv);
> +	intel_uncore_resume_early(&dev_priv->uncore);
> +
> +	i915_check_and_clear_faults(dev_priv);
>  
>  	if (INTEL_GEN(dev_priv) >= 11 || IS_GEN9_LP(dev_priv)) {
>  		gen9_sanitize_dc_state(dev_priv);
> @@ -2852,7 +2854,7 @@ static int intel_runtime_suspend(struct device *kdev)
>  
>  	intel_runtime_pm_disable_interrupts(dev_priv);
>  
> -	intel_uncore_suspend(dev_priv);
> +	intel_uncore_suspend(&dev_priv->uncore);
>  
>  	ret = 0;
>  	if (INTEL_GEN(dev_priv) >= 11) {
> @@ -2869,7 +2871,7 @@ static int intel_runtime_suspend(struct device *kdev)
>  
>  	if (ret) {
>  		DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
> -		intel_uncore_runtime_resume(dev_priv);
> +		intel_uncore_runtime_resume(&dev_priv->uncore);
>  
>  		intel_runtime_pm_enable_interrupts(dev_priv);
>  
> @@ -2966,7 +2968,7 @@ static int intel_runtime_resume(struct device *kdev)
>  		ret = vlv_resume_prepare(dev_priv, true);
>  	}
>  
> -	intel_uncore_runtime_resume(dev_priv);
> +	intel_uncore_runtime_resume(&dev_priv->uncore);
>  
>  	intel_runtime_pm_enable_interrupts(dev_priv);
>  
> diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
> index 75279c627388..dd81c2655e2d 100644
> --- a/drivers/gpu/drm/i915/intel_uncore.c
> +++ b/drivers/gpu/drm/i915/intel_uncore.c
> @@ -523,62 +523,58 @@ check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
>  	return ret;
>  }
>  
> -static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
> +static void __intel_uncore_early_sanitize(struct intel_uncore *uncore,
>  					  unsigned int restore_forcewake)
>  {
> +	struct drm_i915_private *i915 = uncore_to_i915(uncore);
> +
>  	/* clear out unclaimed reg detection bit */
> -	if (check_for_unclaimed_mmio(dev_priv))
> +	if (check_for_unclaimed_mmio(i915))
>  		DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
>  
>  	/* WaDisableShadowRegForCpd:chv */
> -	if (IS_CHERRYVIEW(dev_priv)) {
> -		__raw_i915_write32(dev_priv, GTFIFOCTL,
> -				   __raw_i915_read32(dev_priv, GTFIFOCTL) |
> +	if (IS_CHERRYVIEW(i915)) {
> +		__raw_i915_write32(i915, GTFIFOCTL,
> +				   __raw_i915_read32(i915, GTFIFOCTL) |
>  				   GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
>  				   GT_FIFO_CTL_RC6_POLICY_STALL);
>  	}
>  
>  	iosf_mbi_punit_acquire();
> -	intel_uncore_forcewake_reset(&dev_priv->uncore);
> +	intel_uncore_forcewake_reset(uncore);
>  	if (restore_forcewake) {
> -		spin_lock_irq(&dev_priv->uncore.lock);
> -		dev_priv->uncore.funcs.force_wake_get(&dev_priv->uncore,
> -						      restore_forcewake);
> -
> -		if (IS_GEN_RANGE(dev_priv, 6, 7))
> -			dev_priv->uncore.fifo_count =
> -				fifo_free_entries(dev_priv);
> -		spin_unlock_irq(&dev_priv->uncore.lock);
> +		spin_lock_irq(&uncore->lock);
> +		uncore->funcs.force_wake_get(uncore, restore_forcewake);
> +
> +		if (IS_GEN_RANGE(i915, 6, 7))
> +			uncore->fifo_count = fifo_free_entries(i915);
> +		spin_unlock_irq(&uncore->lock);
>  	}
>  	iosf_mbi_punit_release();
>  }
>  
> -void intel_uncore_suspend(struct drm_i915_private *dev_priv)
> +void intel_uncore_suspend(struct intel_uncore *uncore)
>  {
>  	iosf_mbi_punit_acquire();
>  	iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
> -		&dev_priv->uncore.pmic_bus_access_nb);
> -	dev_priv->uncore.fw_domains_saved =
> -		intel_uncore_forcewake_reset(&dev_priv->uncore);
> +		&uncore->pmic_bus_access_nb);
> +	uncore->fw_domains_saved = intel_uncore_forcewake_reset(uncore);
>  	iosf_mbi_punit_release();
>  }
>  
> -void intel_uncore_resume_early(struct drm_i915_private *dev_priv)
> +void intel_uncore_resume_early(struct intel_uncore *uncore)
>  {
>  	unsigned int restore_forcewake;
>  
> -	restore_forcewake = fetch_and_zero(&dev_priv->uncore.fw_domains_saved);
> -	__intel_uncore_early_sanitize(dev_priv, restore_forcewake);
> +	restore_forcewake = fetch_and_zero(&uncore->fw_domains_saved);
> +	__intel_uncore_early_sanitize(uncore, restore_forcewake);
>  
> -	iosf_mbi_register_pmic_bus_access_notifier(
> -		&dev_priv->uncore.pmic_bus_access_nb);
> -	i915_check_and_clear_faults(dev_priv);
> +	iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
>  }
>  
> -void intel_uncore_runtime_resume(struct drm_i915_private *dev_priv)
> +void intel_uncore_runtime_resume(struct intel_uncore *uncore)
>  {
> -	iosf_mbi_register_pmic_bus_access_notifier(
> -		&dev_priv->uncore.pmic_bus_access_nb);
> +	iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
>  }
>  
>  void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
> @@ -1307,33 +1303,34 @@ __gen6_write(32)
>  #undef GEN6_WRITE_FOOTER
>  #undef GEN6_WRITE_HEADER
>  
> -#define ASSIGN_WRITE_MMIO_VFUNCS(i915, x) \
> +#define ASSIGN_WRITE_MMIO_VFUNCS(uncore, x) \
>  do { \
> -	(i915)->uncore.funcs.mmio_writeb = x##_write8; \
> -	(i915)->uncore.funcs.mmio_writew = x##_write16; \
> -	(i915)->uncore.funcs.mmio_writel = x##_write32; \
> +	(uncore)->funcs.mmio_writeb = x##_write8; \
> +	(uncore)->funcs.mmio_writew = x##_write16; \
> +	(uncore)->funcs.mmio_writel = x##_write32; \
>  } while (0)
>  
> -#define ASSIGN_READ_MMIO_VFUNCS(i915, x) \
> +#define ASSIGN_READ_MMIO_VFUNCS(uncore, x) \
>  do { \
> -	(i915)->uncore.funcs.mmio_readb = x##_read8; \
> -	(i915)->uncore.funcs.mmio_readw = x##_read16; \
> -	(i915)->uncore.funcs.mmio_readl = x##_read32; \
> -	(i915)->uncore.funcs.mmio_readq = x##_read64; \
> +	(uncore)->funcs.mmio_readb = x##_read8; \
> +	(uncore)->funcs.mmio_readw = x##_read16; \
> +	(uncore)->funcs.mmio_readl = x##_read32; \
> +	(uncore)->funcs.mmio_readq = x##_read64; \
>  } while (0)
>  
>  
> -static void fw_domain_init(struct drm_i915_private *dev_priv,
> +static void fw_domain_init(struct intel_uncore *uncore,
>  			   enum forcewake_domain_id domain_id,
>  			   i915_reg_t reg_set,
>  			   i915_reg_t reg_ack)
>  {
>  	struct intel_uncore_forcewake_domain *d;
> +	struct drm_i915_private *i915 = uncore_to_i915(uncore);
>  
>  	if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
>  		return;
>  
> -	d = &dev_priv->uncore.fw_domain[domain_id];
> +	d = &uncore->fw_domain[domain_id];
>  
>  	WARN_ON(d->wake_count);
>  
> @@ -1341,8 +1338,8 @@ static void fw_domain_init(struct drm_i915_private *dev_priv,
>  	WARN_ON(!i915_mmio_reg_valid(reg_ack));
>  
>  	d->wake_count = 0;
> -	d->reg_set = dev_priv->regs + i915_mmio_reg_offset(reg_set);
> -	d->reg_ack = dev_priv->regs + i915_mmio_reg_offset(reg_ack);
> +	d->reg_set = i915->regs + i915_mmio_reg_offset(reg_set);
> +	d->reg_ack = i915->regs + i915_mmio_reg_offset(reg_ack);
>  
>  	d->id = domain_id;
>  
> @@ -1362,12 +1359,12 @@ static void fw_domain_init(struct drm_i915_private *dev_priv,
>  	hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
>  	d->timer.function = intel_uncore_fw_release_timer;
>  
> -	dev_priv->uncore.fw_domains |= BIT(domain_id);
> +	uncore->fw_domains |= BIT(domain_id);
>  
>  	fw_domain_reset(d);
>  }
>  
> -static void fw_domain_fini(struct drm_i915_private *dev_priv,
> +static void fw_domain_fini(struct intel_uncore *uncore,
>  			   enum forcewake_domain_id domain_id)
>  {
>  	struct intel_uncore_forcewake_domain *d;
> @@ -1375,85 +1372,87 @@ static void fw_domain_fini(struct drm_i915_private *dev_priv,
>  	if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
>  		return;
>  
> -	d = &dev_priv->uncore.fw_domain[domain_id];
> +	d = &uncore->fw_domain[domain_id];
>  
>  	WARN_ON(d->wake_count);
>  	WARN_ON(hrtimer_cancel(&d->timer));
>  	memset(d, 0, sizeof(*d));
>  
> -	dev_priv->uncore.fw_domains &= ~BIT(domain_id);
> +	uncore->fw_domains &= ~BIT(domain_id);
>  }
>  
> -static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
> +static void intel_uncore_fw_domains_init(struct intel_uncore *uncore)
>  {
> -	if (INTEL_GEN(dev_priv) <= 5 || intel_vgpu_active(dev_priv))
> +	struct drm_i915_private *i915 = uncore_to_i915(uncore);
> +
> +	if (INTEL_GEN(i915) <= 5 || intel_vgpu_active(i915))
>  		return;
>  
> -	if (IS_GEN(dev_priv, 6)) {
> -		dev_priv->uncore.fw_reset = 0;
> -		dev_priv->uncore.fw_set = FORCEWAKE_KERNEL;
> -		dev_priv->uncore.fw_clear = 0;
> +	if (IS_GEN(i915, 6)) {
> +		uncore->fw_reset = 0;
> +		uncore->fw_set = FORCEWAKE_KERNEL;
> +		uncore->fw_clear = 0;
>  	} else {
>  		/* WaRsClearFWBitsAtReset:bdw,skl */
> -		dev_priv->uncore.fw_reset = _MASKED_BIT_DISABLE(0xffff);
> -		dev_priv->uncore.fw_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
> -		dev_priv->uncore.fw_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
> +		uncore->fw_reset = _MASKED_BIT_DISABLE(0xffff);
> +		uncore->fw_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
> +		uncore->fw_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
>  	}
>  
> -	if (INTEL_GEN(dev_priv) >= 11) {
> +	if (INTEL_GEN(i915) >= 11) {
>  		int i;
>  
> -		dev_priv->uncore.funcs.force_wake_get =
> +		uncore->funcs.force_wake_get =
>  			fw_domains_get_with_fallback;
> -		dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
> -		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
> +		uncore->funcs.force_wake_put = fw_domains_put;
> +		fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
>  			       FORCEWAKE_RENDER_GEN9,
>  			       FORCEWAKE_ACK_RENDER_GEN9);
> -		fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
> +		fw_domain_init(uncore, FW_DOMAIN_ID_BLITTER,
>  			       FORCEWAKE_BLITTER_GEN9,
>  			       FORCEWAKE_ACK_BLITTER_GEN9);
>  		for (i = 0; i < I915_MAX_VCS; i++) {
> -			if (!HAS_ENGINE(dev_priv, _VCS(i)))
> +			if (!HAS_ENGINE(i915, _VCS(i)))
>  				continue;
>  
> -			fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA_VDBOX0 + i,
> +			fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VDBOX0 + i,
>  				       FORCEWAKE_MEDIA_VDBOX_GEN11(i),
>  				       FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(i));
>  		}
>  		for (i = 0; i < I915_MAX_VECS; i++) {
> -			if (!HAS_ENGINE(dev_priv, _VECS(i)))
> +			if (!HAS_ENGINE(i915, _VECS(i)))
>  				continue;
>  
> -			fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA_VEBOX0 + i,
> +			fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VEBOX0 + i,
>  				       FORCEWAKE_MEDIA_VEBOX_GEN11(i),
>  				       FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i));
>  		}
> -	} else if (IS_GEN_RANGE(dev_priv, 9, 10)) {
> -		dev_priv->uncore.funcs.force_wake_get =
> +	} else if (IS_GEN_RANGE(i915, 9, 10)) {
> +		uncore->funcs.force_wake_get =
>  			fw_domains_get_with_fallback;
> -		dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
> -		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
> +		uncore->funcs.force_wake_put = fw_domains_put;
> +		fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
>  			       FORCEWAKE_RENDER_GEN9,
>  			       FORCEWAKE_ACK_RENDER_GEN9);
> -		fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
> +		fw_domain_init(uncore, FW_DOMAIN_ID_BLITTER,
>  			       FORCEWAKE_BLITTER_GEN9,
>  			       FORCEWAKE_ACK_BLITTER_GEN9);
> -		fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
> +		fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
>  			       FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
> -	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
> -		dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
> -		dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
> -		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
> +	} else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
> +		uncore->funcs.force_wake_get = fw_domains_get;
> +		uncore->funcs.force_wake_put = fw_domains_put;
> +		fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
>  			       FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
> -		fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
> +		fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
>  			       FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
> -	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
> -		dev_priv->uncore.funcs.force_wake_get =
> +	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
> +		uncore->funcs.force_wake_get =
>  			fw_domains_get_with_thread_status;
> -		dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
> -		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
> +		uncore->funcs.force_wake_put = fw_domains_put;
> +		fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
>  			       FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
> -	} else if (IS_IVYBRIDGE(dev_priv)) {
> +	} else if (IS_IVYBRIDGE(i915)) {
>  		u32 ecobus;
>  
>  		/* IVB configs may use multi-threaded forcewake */
> @@ -1465,9 +1464,9 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
>  		 * (correctly) interpreted by the test below as MT
>  		 * forcewake being disabled.
>  		 */
> -		dev_priv->uncore.funcs.force_wake_get =
> +		uncore->funcs.force_wake_get =
>  			fw_domains_get_with_thread_status;
> -		dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
> +		uncore->funcs.force_wake_put = fw_domains_put;
>  
>  		/* We need to init first for ECOBUS access and then
>  		 * determine later if we want to reinit, in case of MT access is
> @@ -1476,41 +1475,41 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
>  		 * before the ecobus check.
>  		 */
>  
> -		__raw_i915_write32(dev_priv, FORCEWAKE, 0);
> -		__raw_posting_read(dev_priv, ECOBUS);
> +		__raw_i915_write32(i915, FORCEWAKE, 0);
> +		__raw_posting_read(i915, ECOBUS);
>  
> -		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
> +		fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
>  			       FORCEWAKE_MT, FORCEWAKE_MT_ACK);
>  
> -		spin_lock_irq(&dev_priv->uncore.lock);
> -		fw_domains_get_with_thread_status(&dev_priv->uncore, FORCEWAKE_RENDER);
> -		ecobus = __raw_i915_read32(dev_priv, ECOBUS);
> -		fw_domains_put(&dev_priv->uncore, FORCEWAKE_RENDER);
> -		spin_unlock_irq(&dev_priv->uncore.lock);
> +		spin_lock_irq(&uncore->lock);
> +		fw_domains_get_with_thread_status(uncore, FORCEWAKE_RENDER);
> +		ecobus = __raw_i915_read32(i915, ECOBUS);
> +		fw_domains_put(uncore, FORCEWAKE_RENDER);
> +		spin_unlock_irq(&uncore->lock);
>  
>  		if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
>  			DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
>  			DRM_INFO("when using vblank-synced partial screen updates.\n");
> -			fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
> +			fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
>  				       FORCEWAKE, FORCEWAKE_ACK);
>  		}
> -	} else if (IS_GEN(dev_priv, 6)) {
> -		dev_priv->uncore.funcs.force_wake_get =
> +	} else if (IS_GEN(i915, 6)) {
> +		uncore->funcs.force_wake_get =
>  			fw_domains_get_with_thread_status;
> -		dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
> -		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
> +		uncore->funcs.force_wake_put = fw_domains_put;
> +		fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
>  			       FORCEWAKE, FORCEWAKE_ACK);
>  	}
>  
>  	/* All future platforms are expected to require complex power gating */
> -	WARN_ON(dev_priv->uncore.fw_domains == 0);
> +	WARN_ON(uncore->fw_domains == 0);
>  }
>  
> -#define ASSIGN_FW_DOMAINS_TABLE(d) \
> +#define ASSIGN_FW_DOMAINS_TABLE(uncore, d) \
>  { \
> -	dev_priv->uncore.fw_domains_table = \
> +	(uncore)->fw_domains_table = \
>  			(struct intel_forcewake_range *)(d); \
> -	dev_priv->uncore.fw_domains_table_entries = ARRAY_SIZE((d)); \
> +	(uncore)->fw_domains_table_entries = ARRAY_SIZE((d)); \
>  }
>  
>  static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
> @@ -1546,55 +1545,56 @@ static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
>  	return NOTIFY_OK;
>  }
>  
> -void intel_uncore_init(struct drm_i915_private *dev_priv)
> +void intel_uncore_init(struct intel_uncore *uncore)
>  {
> -	i915_check_vgpu(dev_priv);
> +	struct drm_i915_private *i915 = uncore_to_i915(uncore);
>  
> -	intel_uncore_edram_detect(dev_priv);
> -	intel_uncore_fw_domains_init(dev_priv);
> -	__intel_uncore_early_sanitize(dev_priv, 0);
> +	i915_check_vgpu(i915);
>  
> -	dev_priv->uncore.unclaimed_mmio_check = 1;
> -	dev_priv->uncore.pmic_bus_access_nb.notifier_call =
> -		i915_pmic_bus_access_notifier;
> +	intel_uncore_edram_detect(i915);
> +	intel_uncore_fw_domains_init(uncore);
> +	__intel_uncore_early_sanitize(uncore, 0);
>  
> -	if (IS_GEN_RANGE(dev_priv, 2, 4) || intel_vgpu_active(dev_priv)) {
> -		ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen2);
> -		ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen2);
> -	} else if (IS_GEN(dev_priv, 5)) {
> -		ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen5);
> -		ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen5);
> -	} else if (IS_GEN_RANGE(dev_priv, 6, 7)) {
> -		ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen6);
> +	uncore->unclaimed_mmio_check = 1;
> +	uncore->pmic_bus_access_nb.notifier_call =
> +		i915_pmic_bus_access_notifier;
>  
> -		if (IS_VALLEYVIEW(dev_priv)) {
> -			ASSIGN_FW_DOMAINS_TABLE(__vlv_fw_ranges);
> -			ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable);
> +	if (IS_GEN_RANGE(i915, 2, 4) || intel_vgpu_active(i915)) {
> +		ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen2);
> +		ASSIGN_READ_MMIO_VFUNCS(uncore, gen2);
> +	} else if (IS_GEN(i915, 5)) {
> +		ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen5);
> +		ASSIGN_READ_MMIO_VFUNCS(uncore, gen5);
> +	} else if (IS_GEN_RANGE(i915, 6, 7)) {
> +		ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
> +
> +		if (IS_VALLEYVIEW(i915)) {
> +			ASSIGN_FW_DOMAINS_TABLE(uncore, __vlv_fw_ranges);
> +			ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
>  		} else {
> -			ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen6);
> +			ASSIGN_READ_MMIO_VFUNCS(uncore, gen6);
>  		}
> -	} else if (IS_GEN(dev_priv, 8)) {
> -		if (IS_CHERRYVIEW(dev_priv)) {
> -			ASSIGN_FW_DOMAINS_TABLE(__chv_fw_ranges);
> -			ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, fwtable);
> -			ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable);
> +	} else if (IS_GEN(i915, 8)) {
> +		if (IS_CHERRYVIEW(i915)) {
> +			ASSIGN_FW_DOMAINS_TABLE(uncore, __chv_fw_ranges);
> +			ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
> +			ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
>  
>  		} else {
> -			ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen8);
> -			ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen6);
> +			ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen8);
> +			ASSIGN_READ_MMIO_VFUNCS(uncore, gen6);
>  		}
> -	} else if (IS_GEN_RANGE(dev_priv, 9, 10)) {
> -		ASSIGN_FW_DOMAINS_TABLE(__gen9_fw_ranges);
> -		ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, fwtable);
> -		ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable);
> +	} else if (IS_GEN_RANGE(i915, 9, 10)) {
> +		ASSIGN_FW_DOMAINS_TABLE(uncore, __gen9_fw_ranges);
> +		ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
> +		ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
>  	} else {
> -		ASSIGN_FW_DOMAINS_TABLE(__gen11_fw_ranges);
> -		ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen11_fwtable);
> -		ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen11_fwtable);
> +		ASSIGN_FW_DOMAINS_TABLE(uncore, __gen11_fw_ranges);
> +		ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen11_fwtable);
> +		ASSIGN_READ_MMIO_VFUNCS(uncore, gen11_fwtable);
>  	}
>  
> -	iosf_mbi_register_pmic_bus_access_notifier(
> -		&dev_priv->uncore.pmic_bus_access_nb);
> +	iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
>  }
>  
>  /*
> @@ -1602,44 +1602,46 @@ void intel_uncore_init(struct drm_i915_private *dev_priv)
>   * the forcewake domains. Prune them, to make sure they only reference existing
>   * engines.
>   */
> -void intel_uncore_prune(struct drm_i915_private *dev_priv)
> +void intel_uncore_prune(struct intel_uncore *uncore)
>  {
> -	if (INTEL_GEN(dev_priv) >= 11) {
> -		enum forcewake_domains fw_domains = dev_priv->uncore.fw_domains;
> +	struct drm_i915_private *i915 = uncore_to_i915(uncore);
> +
> +	if (INTEL_GEN(i915) >= 11) {
> +		enum forcewake_domains fw_domains = uncore->fw_domains;
>  		enum forcewake_domain_id domain_id;
>  		int i;
>  
>  		for (i = 0; i < I915_MAX_VCS; i++) {
>  			domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i;
>  
> -			if (HAS_ENGINE(dev_priv, _VCS(i)))
> +			if (HAS_ENGINE(i915, _VCS(i)))
>  				continue;
>  
>  			if (fw_domains & BIT(domain_id))
> -				fw_domain_fini(dev_priv, domain_id);
> +				fw_domain_fini(uncore, domain_id);
>  		}
>  
>  		for (i = 0; i < I915_MAX_VECS; i++) {
>  			domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i;
>  
> -			if (HAS_ENGINE(dev_priv, _VECS(i)))
> +			if (HAS_ENGINE(i915, _VECS(i)))
>  				continue;
>  
>  			if (fw_domains & BIT(domain_id))
> -				fw_domain_fini(dev_priv, domain_id);
> +				fw_domain_fini(uncore, domain_id);
>  		}
>  	}
>  }
>  
> -void intel_uncore_fini(struct drm_i915_private *dev_priv)
> +void intel_uncore_fini(struct intel_uncore *uncore)
>  {
>  	/* Paranoia: make sure we have disabled everything before we exit. */
> -	intel_uncore_sanitize(dev_priv);
> +	intel_uncore_sanitize(uncore_to_i915(uncore));
>  
>  	iosf_mbi_punit_acquire();
>  	iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
> -		&dev_priv->uncore.pmic_bus_access_nb);
> -	intel_uncore_forcewake_reset(&dev_priv->uncore);
> +		&uncore->pmic_bus_access_nb);
> +	intel_uncore_forcewake_reset(uncore);
>  	iosf_mbi_punit_release();
>  }
>  
> diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
> index 2293df59d8e9..4d0d7ec785f8 100644
> --- a/drivers/gpu/drm/i915/intel_uncore.h
> +++ b/drivers/gpu/drm/i915/intel_uncore.h
> @@ -146,14 +146,14 @@ forcewake_domain_to_uncore(const struct intel_uncore_forcewake_domain *d)
>  }
>  
>  void intel_uncore_sanitize(struct drm_i915_private *dev_priv);
> -void intel_uncore_init(struct drm_i915_private *dev_priv);
> -void intel_uncore_prune(struct drm_i915_private *dev_priv);
> +void intel_uncore_init(struct intel_uncore *uncore);
> +void intel_uncore_prune(struct intel_uncore *uncore);
>  bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv);
>  bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv);
> -void intel_uncore_fini(struct drm_i915_private *dev_priv);
> -void intel_uncore_suspend(struct drm_i915_private *dev_priv);
> -void intel_uncore_resume_early(struct drm_i915_private *dev_priv);
> -void intel_uncore_runtime_resume(struct drm_i915_private *dev_priv);
> +void intel_uncore_fini(struct intel_uncore *uncore);
> +void intel_uncore_suspend(struct intel_uncore *uncore);
> +void intel_uncore_resume_early(struct intel_uncore *uncore);
> +void intel_uncore_runtime_resume(struct intel_uncore *uncore);
>  
>  u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv);
>  void assert_forcewakes_inactive(struct intel_uncore *uncore);
> diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
> index 54cfb611c0aa..60bbf8b4df40 100644
> --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
> +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
> @@ -182,7 +182,7 @@ struct drm_i915_private *mock_gem_device(void)
>  		I915_GTT_PAGE_SIZE_64K |
>  		I915_GTT_PAGE_SIZE_2M;
>  
> -	mock_uncore_init(i915);
> +	mock_uncore_init(&i915->uncore);
>  	i915_gem_init__mm(i915);
>  
>  	init_waitqueue_head(&i915->gpu_error.wait_queue);
> diff --git a/drivers/gpu/drm/i915/selftests/mock_uncore.c b/drivers/gpu/drm/i915/selftests/mock_uncore.c
> index 8ef14c7e5e38..c3896c1fd551 100644
> --- a/drivers/gpu/drm/i915/selftests/mock_uncore.c
> +++ b/drivers/gpu/drm/i915/selftests/mock_uncore.c
> @@ -39,8 +39,8 @@ __nop_read(16)
>  __nop_read(32)
>  __nop_read(64)
>  
> -void mock_uncore_init(struct drm_i915_private *i915)
> +void mock_uncore_init(struct intel_uncore *uncore)
>  {
> -	ASSIGN_WRITE_MMIO_VFUNCS(i915, nop);
> -	ASSIGN_READ_MMIO_VFUNCS(i915, nop);
> +	ASSIGN_WRITE_MMIO_VFUNCS(uncore, nop);
> +	ASSIGN_READ_MMIO_VFUNCS(uncore, nop);
>  }
> diff --git a/drivers/gpu/drm/i915/selftests/mock_uncore.h b/drivers/gpu/drm/i915/selftests/mock_uncore.h
> index d79aa3ca4d51..dacb36b5ffcd 100644
> --- a/drivers/gpu/drm/i915/selftests/mock_uncore.h
> +++ b/drivers/gpu/drm/i915/selftests/mock_uncore.h
> @@ -25,6 +25,6 @@
>  #ifndef __MOCK_UNCORE_H
>  #define __MOCK_UNCORE_H
>  
> -void mock_uncore_init(struct drm_i915_private *i915);
> +void mock_uncore_init(struct intel_uncore *uncore);
>  
>  #endif /* !__MOCK_UNCORE_H */
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 4ace6fadfbc2..a2e039f523c0 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -993,11 +993,11 @@  static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
 	if (ret < 0)
 		goto err_bridge;
 
-	intel_uncore_init(dev_priv);
+	intel_uncore_init(&dev_priv->uncore);
 
 	intel_device_info_init_mmio(dev_priv);
 
-	intel_uncore_prune(dev_priv);
+	intel_uncore_prune(&dev_priv->uncore);
 
 	intel_uc_init_mmio(dev_priv);
 
@@ -1010,7 +1010,7 @@  static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
 	return 0;
 
 err_uncore:
-	intel_uncore_fini(dev_priv);
+	intel_uncore_fini(&dev_priv->uncore);
 	i915_mmio_cleanup(dev_priv);
 err_bridge:
 	pci_dev_put(dev_priv->bridge_dev);
@@ -1024,7 +1024,7 @@  static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
  */
 static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
 {
-	intel_uncore_fini(dev_priv);
+	intel_uncore_fini(&dev_priv->uncore);
 	i915_mmio_cleanup(dev_priv);
 	pci_dev_put(dev_priv->bridge_dev);
 }
@@ -2086,7 +2086,7 @@  static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
 
 	i915_gem_suspend_late(dev_priv);
 
-	intel_uncore_suspend(dev_priv);
+	intel_uncore_suspend(&dev_priv->uncore);
 
 	intel_power_domains_suspend(dev_priv,
 				    get_suspend_mode(dev_priv, hibernation));
@@ -2282,7 +2282,9 @@  static int i915_drm_resume_early(struct drm_device *dev)
 		DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
 			  ret);
 
-	intel_uncore_resume_early(dev_priv);
+	intel_uncore_resume_early(&dev_priv->uncore);
+
+	i915_check_and_clear_faults(dev_priv);
 
 	if (INTEL_GEN(dev_priv) >= 11 || IS_GEN9_LP(dev_priv)) {
 		gen9_sanitize_dc_state(dev_priv);
@@ -2852,7 +2854,7 @@  static int intel_runtime_suspend(struct device *kdev)
 
 	intel_runtime_pm_disable_interrupts(dev_priv);
 
-	intel_uncore_suspend(dev_priv);
+	intel_uncore_suspend(&dev_priv->uncore);
 
 	ret = 0;
 	if (INTEL_GEN(dev_priv) >= 11) {
@@ -2869,7 +2871,7 @@  static int intel_runtime_suspend(struct device *kdev)
 
 	if (ret) {
 		DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
-		intel_uncore_runtime_resume(dev_priv);
+		intel_uncore_runtime_resume(&dev_priv->uncore);
 
 		intel_runtime_pm_enable_interrupts(dev_priv);
 
@@ -2966,7 +2968,7 @@  static int intel_runtime_resume(struct device *kdev)
 		ret = vlv_resume_prepare(dev_priv, true);
 	}
 
-	intel_uncore_runtime_resume(dev_priv);
+	intel_uncore_runtime_resume(&dev_priv->uncore);
 
 	intel_runtime_pm_enable_interrupts(dev_priv);
 
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 75279c627388..dd81c2655e2d 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -523,62 +523,58 @@  check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
 	return ret;
 }
 
-static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
+static void __intel_uncore_early_sanitize(struct intel_uncore *uncore,
 					  unsigned int restore_forcewake)
 {
+	struct drm_i915_private *i915 = uncore_to_i915(uncore);
+
 	/* clear out unclaimed reg detection bit */
-	if (check_for_unclaimed_mmio(dev_priv))
+	if (check_for_unclaimed_mmio(i915))
 		DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
 
 	/* WaDisableShadowRegForCpd:chv */
-	if (IS_CHERRYVIEW(dev_priv)) {
-		__raw_i915_write32(dev_priv, GTFIFOCTL,
-				   __raw_i915_read32(dev_priv, GTFIFOCTL) |
+	if (IS_CHERRYVIEW(i915)) {
+		__raw_i915_write32(i915, GTFIFOCTL,
+				   __raw_i915_read32(i915, GTFIFOCTL) |
 				   GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
 				   GT_FIFO_CTL_RC6_POLICY_STALL);
 	}
 
 	iosf_mbi_punit_acquire();
-	intel_uncore_forcewake_reset(&dev_priv->uncore);
+	intel_uncore_forcewake_reset(uncore);
 	if (restore_forcewake) {
-		spin_lock_irq(&dev_priv->uncore.lock);
-		dev_priv->uncore.funcs.force_wake_get(&dev_priv->uncore,
-						      restore_forcewake);
-
-		if (IS_GEN_RANGE(dev_priv, 6, 7))
-			dev_priv->uncore.fifo_count =
-				fifo_free_entries(dev_priv);
-		spin_unlock_irq(&dev_priv->uncore.lock);
+		spin_lock_irq(&uncore->lock);
+		uncore->funcs.force_wake_get(uncore, restore_forcewake);
+
+		if (IS_GEN_RANGE(i915, 6, 7))
+			uncore->fifo_count = fifo_free_entries(i915);
+		spin_unlock_irq(&uncore->lock);
 	}
 	iosf_mbi_punit_release();
 }
 
-void intel_uncore_suspend(struct drm_i915_private *dev_priv)
+void intel_uncore_suspend(struct intel_uncore *uncore)
 {
 	iosf_mbi_punit_acquire();
 	iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
-		&dev_priv->uncore.pmic_bus_access_nb);
-	dev_priv->uncore.fw_domains_saved =
-		intel_uncore_forcewake_reset(&dev_priv->uncore);
+		&uncore->pmic_bus_access_nb);
+	uncore->fw_domains_saved = intel_uncore_forcewake_reset(uncore);
 	iosf_mbi_punit_release();
 }
 
-void intel_uncore_resume_early(struct drm_i915_private *dev_priv)
+void intel_uncore_resume_early(struct intel_uncore *uncore)
 {
 	unsigned int restore_forcewake;
 
-	restore_forcewake = fetch_and_zero(&dev_priv->uncore.fw_domains_saved);
-	__intel_uncore_early_sanitize(dev_priv, restore_forcewake);
+	restore_forcewake = fetch_and_zero(&uncore->fw_domains_saved);
+	__intel_uncore_early_sanitize(uncore, restore_forcewake);
 
-	iosf_mbi_register_pmic_bus_access_notifier(
-		&dev_priv->uncore.pmic_bus_access_nb);
-	i915_check_and_clear_faults(dev_priv);
+	iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
 }
 
-void intel_uncore_runtime_resume(struct drm_i915_private *dev_priv)
+void intel_uncore_runtime_resume(struct intel_uncore *uncore)
 {
-	iosf_mbi_register_pmic_bus_access_notifier(
-		&dev_priv->uncore.pmic_bus_access_nb);
+	iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
 }
 
 void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
@@ -1307,33 +1303,34 @@  __gen6_write(32)
 #undef GEN6_WRITE_FOOTER
 #undef GEN6_WRITE_HEADER
 
-#define ASSIGN_WRITE_MMIO_VFUNCS(i915, x) \
+#define ASSIGN_WRITE_MMIO_VFUNCS(uncore, x) \
 do { \
-	(i915)->uncore.funcs.mmio_writeb = x##_write8; \
-	(i915)->uncore.funcs.mmio_writew = x##_write16; \
-	(i915)->uncore.funcs.mmio_writel = x##_write32; \
+	(uncore)->funcs.mmio_writeb = x##_write8; \
+	(uncore)->funcs.mmio_writew = x##_write16; \
+	(uncore)->funcs.mmio_writel = x##_write32; \
 } while (0)
 
-#define ASSIGN_READ_MMIO_VFUNCS(i915, x) \
+#define ASSIGN_READ_MMIO_VFUNCS(uncore, x) \
 do { \
-	(i915)->uncore.funcs.mmio_readb = x##_read8; \
-	(i915)->uncore.funcs.mmio_readw = x##_read16; \
-	(i915)->uncore.funcs.mmio_readl = x##_read32; \
-	(i915)->uncore.funcs.mmio_readq = x##_read64; \
+	(uncore)->funcs.mmio_readb = x##_read8; \
+	(uncore)->funcs.mmio_readw = x##_read16; \
+	(uncore)->funcs.mmio_readl = x##_read32; \
+	(uncore)->funcs.mmio_readq = x##_read64; \
 } while (0)
 
 
-static void fw_domain_init(struct drm_i915_private *dev_priv,
+static void fw_domain_init(struct intel_uncore *uncore,
 			   enum forcewake_domain_id domain_id,
 			   i915_reg_t reg_set,
 			   i915_reg_t reg_ack)
 {
 	struct intel_uncore_forcewake_domain *d;
+	struct drm_i915_private *i915 = uncore_to_i915(uncore);
 
 	if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
 		return;
 
-	d = &dev_priv->uncore.fw_domain[domain_id];
+	d = &uncore->fw_domain[domain_id];
 
 	WARN_ON(d->wake_count);
 
@@ -1341,8 +1338,8 @@  static void fw_domain_init(struct drm_i915_private *dev_priv,
 	WARN_ON(!i915_mmio_reg_valid(reg_ack));
 
 	d->wake_count = 0;
-	d->reg_set = dev_priv->regs + i915_mmio_reg_offset(reg_set);
-	d->reg_ack = dev_priv->regs + i915_mmio_reg_offset(reg_ack);
+	d->reg_set = i915->regs + i915_mmio_reg_offset(reg_set);
+	d->reg_ack = i915->regs + i915_mmio_reg_offset(reg_ack);
 
 	d->id = domain_id;
 
@@ -1362,12 +1359,12 @@  static void fw_domain_init(struct drm_i915_private *dev_priv,
 	hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
 	d->timer.function = intel_uncore_fw_release_timer;
 
-	dev_priv->uncore.fw_domains |= BIT(domain_id);
+	uncore->fw_domains |= BIT(domain_id);
 
 	fw_domain_reset(d);
 }
 
-static void fw_domain_fini(struct drm_i915_private *dev_priv,
+static void fw_domain_fini(struct intel_uncore *uncore,
 			   enum forcewake_domain_id domain_id)
 {
 	struct intel_uncore_forcewake_domain *d;
@@ -1375,85 +1372,87 @@  static void fw_domain_fini(struct drm_i915_private *dev_priv,
 	if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
 		return;
 
-	d = &dev_priv->uncore.fw_domain[domain_id];
+	d = &uncore->fw_domain[domain_id];
 
 	WARN_ON(d->wake_count);
 	WARN_ON(hrtimer_cancel(&d->timer));
 	memset(d, 0, sizeof(*d));
 
-	dev_priv->uncore.fw_domains &= ~BIT(domain_id);
+	uncore->fw_domains &= ~BIT(domain_id);
 }
 
-static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
+static void intel_uncore_fw_domains_init(struct intel_uncore *uncore)
 {
-	if (INTEL_GEN(dev_priv) <= 5 || intel_vgpu_active(dev_priv))
+	struct drm_i915_private *i915 = uncore_to_i915(uncore);
+
+	if (INTEL_GEN(i915) <= 5 || intel_vgpu_active(i915))
 		return;
 
-	if (IS_GEN(dev_priv, 6)) {
-		dev_priv->uncore.fw_reset = 0;
-		dev_priv->uncore.fw_set = FORCEWAKE_KERNEL;
-		dev_priv->uncore.fw_clear = 0;
+	if (IS_GEN(i915, 6)) {
+		uncore->fw_reset = 0;
+		uncore->fw_set = FORCEWAKE_KERNEL;
+		uncore->fw_clear = 0;
 	} else {
 		/* WaRsClearFWBitsAtReset:bdw,skl */
-		dev_priv->uncore.fw_reset = _MASKED_BIT_DISABLE(0xffff);
-		dev_priv->uncore.fw_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
-		dev_priv->uncore.fw_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
+		uncore->fw_reset = _MASKED_BIT_DISABLE(0xffff);
+		uncore->fw_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
+		uncore->fw_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
 	}
 
-	if (INTEL_GEN(dev_priv) >= 11) {
+	if (INTEL_GEN(i915) >= 11) {
 		int i;
 
-		dev_priv->uncore.funcs.force_wake_get =
+		uncore->funcs.force_wake_get =
 			fw_domains_get_with_fallback;
-		dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
-		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+		uncore->funcs.force_wake_put = fw_domains_put;
+		fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
 			       FORCEWAKE_RENDER_GEN9,
 			       FORCEWAKE_ACK_RENDER_GEN9);
-		fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
+		fw_domain_init(uncore, FW_DOMAIN_ID_BLITTER,
 			       FORCEWAKE_BLITTER_GEN9,
 			       FORCEWAKE_ACK_BLITTER_GEN9);
 		for (i = 0; i < I915_MAX_VCS; i++) {
-			if (!HAS_ENGINE(dev_priv, _VCS(i)))
+			if (!HAS_ENGINE(i915, _VCS(i)))
 				continue;
 
-			fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA_VDBOX0 + i,
+			fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VDBOX0 + i,
 				       FORCEWAKE_MEDIA_VDBOX_GEN11(i),
 				       FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(i));
 		}
 		for (i = 0; i < I915_MAX_VECS; i++) {
-			if (!HAS_ENGINE(dev_priv, _VECS(i)))
+			if (!HAS_ENGINE(i915, _VECS(i)))
 				continue;
 
-			fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA_VEBOX0 + i,
+			fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VEBOX0 + i,
 				       FORCEWAKE_MEDIA_VEBOX_GEN11(i),
 				       FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i));
 		}
-	} else if (IS_GEN_RANGE(dev_priv, 9, 10)) {
-		dev_priv->uncore.funcs.force_wake_get =
+	} else if (IS_GEN_RANGE(i915, 9, 10)) {
+		uncore->funcs.force_wake_get =
 			fw_domains_get_with_fallback;
-		dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
-		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+		uncore->funcs.force_wake_put = fw_domains_put;
+		fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
 			       FORCEWAKE_RENDER_GEN9,
 			       FORCEWAKE_ACK_RENDER_GEN9);
-		fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
+		fw_domain_init(uncore, FW_DOMAIN_ID_BLITTER,
 			       FORCEWAKE_BLITTER_GEN9,
 			       FORCEWAKE_ACK_BLITTER_GEN9);
-		fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
+		fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
 			       FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
-	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
-		dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
-		dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
-		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+	} else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+		uncore->funcs.force_wake_get = fw_domains_get;
+		uncore->funcs.force_wake_put = fw_domains_put;
+		fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
 			       FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
-		fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
+		fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
 			       FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
-	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
-		dev_priv->uncore.funcs.force_wake_get =
+	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
+		uncore->funcs.force_wake_get =
 			fw_domains_get_with_thread_status;
-		dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
-		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+		uncore->funcs.force_wake_put = fw_domains_put;
+		fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
 			       FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
-	} else if (IS_IVYBRIDGE(dev_priv)) {
+	} else if (IS_IVYBRIDGE(i915)) {
 		u32 ecobus;
 
 		/* IVB configs may use multi-threaded forcewake */
@@ -1465,9 +1464,9 @@  static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
 		 * (correctly) interpreted by the test below as MT
 		 * forcewake being disabled.
 		 */
-		dev_priv->uncore.funcs.force_wake_get =
+		uncore->funcs.force_wake_get =
 			fw_domains_get_with_thread_status;
-		dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
+		uncore->funcs.force_wake_put = fw_domains_put;
 
 		/* We need to init first for ECOBUS access and then
 		 * determine later if we want to reinit, in case of MT access is
@@ -1476,41 +1475,41 @@  static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
 		 * before the ecobus check.
 		 */
 
-		__raw_i915_write32(dev_priv, FORCEWAKE, 0);
-		__raw_posting_read(dev_priv, ECOBUS);
+		__raw_i915_write32(i915, FORCEWAKE, 0);
+		__raw_posting_read(i915, ECOBUS);
 
-		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+		fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
 			       FORCEWAKE_MT, FORCEWAKE_MT_ACK);
 
-		spin_lock_irq(&dev_priv->uncore.lock);
-		fw_domains_get_with_thread_status(&dev_priv->uncore, FORCEWAKE_RENDER);
-		ecobus = __raw_i915_read32(dev_priv, ECOBUS);
-		fw_domains_put(&dev_priv->uncore, FORCEWAKE_RENDER);
-		spin_unlock_irq(&dev_priv->uncore.lock);
+		spin_lock_irq(&uncore->lock);
+		fw_domains_get_with_thread_status(uncore, FORCEWAKE_RENDER);
+		ecobus = __raw_i915_read32(i915, ECOBUS);
+		fw_domains_put(uncore, FORCEWAKE_RENDER);
+		spin_unlock_irq(&uncore->lock);
 
 		if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
 			DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
 			DRM_INFO("when using vblank-synced partial screen updates.\n");
-			fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+			fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
 				       FORCEWAKE, FORCEWAKE_ACK);
 		}
-	} else if (IS_GEN(dev_priv, 6)) {
-		dev_priv->uncore.funcs.force_wake_get =
+	} else if (IS_GEN(i915, 6)) {
+		uncore->funcs.force_wake_get =
 			fw_domains_get_with_thread_status;
-		dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
-		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+		uncore->funcs.force_wake_put = fw_domains_put;
+		fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
 			       FORCEWAKE, FORCEWAKE_ACK);
 	}
 
 	/* All future platforms are expected to require complex power gating */
-	WARN_ON(dev_priv->uncore.fw_domains == 0);
+	WARN_ON(uncore->fw_domains == 0);
 }
 
-#define ASSIGN_FW_DOMAINS_TABLE(d) \
+#define ASSIGN_FW_DOMAINS_TABLE(uncore, d) \
 { \
-	dev_priv->uncore.fw_domains_table = \
+	(uncore)->fw_domains_table = \
 			(struct intel_forcewake_range *)(d); \
-	dev_priv->uncore.fw_domains_table_entries = ARRAY_SIZE((d)); \
+	(uncore)->fw_domains_table_entries = ARRAY_SIZE((d)); \
 }
 
 static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
@@ -1546,55 +1545,56 @@  static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
 	return NOTIFY_OK;
 }
 
-void intel_uncore_init(struct drm_i915_private *dev_priv)
+void intel_uncore_init(struct intel_uncore *uncore)
 {
-	i915_check_vgpu(dev_priv);
+	struct drm_i915_private *i915 = uncore_to_i915(uncore);
 
-	intel_uncore_edram_detect(dev_priv);
-	intel_uncore_fw_domains_init(dev_priv);
-	__intel_uncore_early_sanitize(dev_priv, 0);
+	i915_check_vgpu(i915);
 
-	dev_priv->uncore.unclaimed_mmio_check = 1;
-	dev_priv->uncore.pmic_bus_access_nb.notifier_call =
-		i915_pmic_bus_access_notifier;
+	intel_uncore_edram_detect(i915);
+	intel_uncore_fw_domains_init(uncore);
+	__intel_uncore_early_sanitize(uncore, 0);
 
-	if (IS_GEN_RANGE(dev_priv, 2, 4) || intel_vgpu_active(dev_priv)) {
-		ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen2);
-		ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen2);
-	} else if (IS_GEN(dev_priv, 5)) {
-		ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen5);
-		ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen5);
-	} else if (IS_GEN_RANGE(dev_priv, 6, 7)) {
-		ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen6);
+	uncore->unclaimed_mmio_check = 1;
+	uncore->pmic_bus_access_nb.notifier_call =
+		i915_pmic_bus_access_notifier;
 
-		if (IS_VALLEYVIEW(dev_priv)) {
-			ASSIGN_FW_DOMAINS_TABLE(__vlv_fw_ranges);
-			ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable);
+	if (IS_GEN_RANGE(i915, 2, 4) || intel_vgpu_active(i915)) {
+		ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen2);
+		ASSIGN_READ_MMIO_VFUNCS(uncore, gen2);
+	} else if (IS_GEN(i915, 5)) {
+		ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen5);
+		ASSIGN_READ_MMIO_VFUNCS(uncore, gen5);
+	} else if (IS_GEN_RANGE(i915, 6, 7)) {
+		ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
+
+		if (IS_VALLEYVIEW(i915)) {
+			ASSIGN_FW_DOMAINS_TABLE(uncore, __vlv_fw_ranges);
+			ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
 		} else {
-			ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen6);
+			ASSIGN_READ_MMIO_VFUNCS(uncore, gen6);
 		}
-	} else if (IS_GEN(dev_priv, 8)) {
-		if (IS_CHERRYVIEW(dev_priv)) {
-			ASSIGN_FW_DOMAINS_TABLE(__chv_fw_ranges);
-			ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, fwtable);
-			ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable);
+	} else if (IS_GEN(i915, 8)) {
+		if (IS_CHERRYVIEW(i915)) {
+			ASSIGN_FW_DOMAINS_TABLE(uncore, __chv_fw_ranges);
+			ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
+			ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
 
 		} else {
-			ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen8);
-			ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen6);
+			ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen8);
+			ASSIGN_READ_MMIO_VFUNCS(uncore, gen6);
 		}
-	} else if (IS_GEN_RANGE(dev_priv, 9, 10)) {
-		ASSIGN_FW_DOMAINS_TABLE(__gen9_fw_ranges);
-		ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, fwtable);
-		ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable);
+	} else if (IS_GEN_RANGE(i915, 9, 10)) {
+		ASSIGN_FW_DOMAINS_TABLE(uncore, __gen9_fw_ranges);
+		ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
+		ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
 	} else {
-		ASSIGN_FW_DOMAINS_TABLE(__gen11_fw_ranges);
-		ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen11_fwtable);
-		ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen11_fwtable);
+		ASSIGN_FW_DOMAINS_TABLE(uncore, __gen11_fw_ranges);
+		ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen11_fwtable);
+		ASSIGN_READ_MMIO_VFUNCS(uncore, gen11_fwtable);
 	}
 
-	iosf_mbi_register_pmic_bus_access_notifier(
-		&dev_priv->uncore.pmic_bus_access_nb);
+	iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
 }
 
 /*
@@ -1602,44 +1602,46 @@  void intel_uncore_init(struct drm_i915_private *dev_priv)
  * the forcewake domains. Prune them, to make sure they only reference existing
  * engines.
  */
-void intel_uncore_prune(struct drm_i915_private *dev_priv)
+void intel_uncore_prune(struct intel_uncore *uncore)
 {
-	if (INTEL_GEN(dev_priv) >= 11) {
-		enum forcewake_domains fw_domains = dev_priv->uncore.fw_domains;
+	struct drm_i915_private *i915 = uncore_to_i915(uncore);
+
+	if (INTEL_GEN(i915) >= 11) {
+		enum forcewake_domains fw_domains = uncore->fw_domains;
 		enum forcewake_domain_id domain_id;
 		int i;
 
 		for (i = 0; i < I915_MAX_VCS; i++) {
 			domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i;
 
-			if (HAS_ENGINE(dev_priv, _VCS(i)))
+			if (HAS_ENGINE(i915, _VCS(i)))
 				continue;
 
 			if (fw_domains & BIT(domain_id))
-				fw_domain_fini(dev_priv, domain_id);
+				fw_domain_fini(uncore, domain_id);
 		}
 
 		for (i = 0; i < I915_MAX_VECS; i++) {
 			domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i;
 
-			if (HAS_ENGINE(dev_priv, _VECS(i)))
+			if (HAS_ENGINE(i915, _VECS(i)))
 				continue;
 
 			if (fw_domains & BIT(domain_id))
-				fw_domain_fini(dev_priv, domain_id);
+				fw_domain_fini(uncore, domain_id);
 		}
 	}
 }
 
-void intel_uncore_fini(struct drm_i915_private *dev_priv)
+void intel_uncore_fini(struct intel_uncore *uncore)
 {
 	/* Paranoia: make sure we have disabled everything before we exit. */
-	intel_uncore_sanitize(dev_priv);
+	intel_uncore_sanitize(uncore_to_i915(uncore));
 
 	iosf_mbi_punit_acquire();
 	iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
-		&dev_priv->uncore.pmic_bus_access_nb);
-	intel_uncore_forcewake_reset(&dev_priv->uncore);
+		&uncore->pmic_bus_access_nb);
+	intel_uncore_forcewake_reset(uncore);
 	iosf_mbi_punit_release();
 }
 
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index 2293df59d8e9..4d0d7ec785f8 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -146,14 +146,14 @@  forcewake_domain_to_uncore(const struct intel_uncore_forcewake_domain *d)
 }
 
 void intel_uncore_sanitize(struct drm_i915_private *dev_priv);
-void intel_uncore_init(struct drm_i915_private *dev_priv);
-void intel_uncore_prune(struct drm_i915_private *dev_priv);
+void intel_uncore_init(struct intel_uncore *uncore);
+void intel_uncore_prune(struct intel_uncore *uncore);
 bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv);
 bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv);
-void intel_uncore_fini(struct drm_i915_private *dev_priv);
-void intel_uncore_suspend(struct drm_i915_private *dev_priv);
-void intel_uncore_resume_early(struct drm_i915_private *dev_priv);
-void intel_uncore_runtime_resume(struct drm_i915_private *dev_priv);
+void intel_uncore_fini(struct intel_uncore *uncore);
+void intel_uncore_suspend(struct intel_uncore *uncore);
+void intel_uncore_resume_early(struct intel_uncore *uncore);
+void intel_uncore_runtime_resume(struct intel_uncore *uncore);
 
 u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv);
 void assert_forcewakes_inactive(struct intel_uncore *uncore);
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 54cfb611c0aa..60bbf8b4df40 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -182,7 +182,7 @@  struct drm_i915_private *mock_gem_device(void)
 		I915_GTT_PAGE_SIZE_64K |
 		I915_GTT_PAGE_SIZE_2M;
 
-	mock_uncore_init(i915);
+	mock_uncore_init(&i915->uncore);
 	i915_gem_init__mm(i915);
 
 	init_waitqueue_head(&i915->gpu_error.wait_queue);
diff --git a/drivers/gpu/drm/i915/selftests/mock_uncore.c b/drivers/gpu/drm/i915/selftests/mock_uncore.c
index 8ef14c7e5e38..c3896c1fd551 100644
--- a/drivers/gpu/drm/i915/selftests/mock_uncore.c
+++ b/drivers/gpu/drm/i915/selftests/mock_uncore.c
@@ -39,8 +39,8 @@  __nop_read(16)
 __nop_read(32)
 __nop_read(64)
 
-void mock_uncore_init(struct drm_i915_private *i915)
+void mock_uncore_init(struct intel_uncore *uncore)
 {
-	ASSIGN_WRITE_MMIO_VFUNCS(i915, nop);
-	ASSIGN_READ_MMIO_VFUNCS(i915, nop);
+	ASSIGN_WRITE_MMIO_VFUNCS(uncore, nop);
+	ASSIGN_READ_MMIO_VFUNCS(uncore, nop);
 }
diff --git a/drivers/gpu/drm/i915/selftests/mock_uncore.h b/drivers/gpu/drm/i915/selftests/mock_uncore.h
index d79aa3ca4d51..dacb36b5ffcd 100644
--- a/drivers/gpu/drm/i915/selftests/mock_uncore.h
+++ b/drivers/gpu/drm/i915/selftests/mock_uncore.h
@@ -25,6 +25,6 @@ 
 #ifndef __MOCK_UNCORE_H
 #define __MOCK_UNCORE_H
 
-void mock_uncore_init(struct drm_i915_private *i915);
+void mock_uncore_init(struct intel_uncore *uncore);
 
 #endif /* !__MOCK_UNCORE_H */