diff mbox series

[2/4] drm/vmwgfx: remove CONFIG_INTEL_IOMMU ifdefs

Message ID 20190105080108.14837-3-hch@lst.de (mailing list archive)
State New, archived
Headers show
Series [1/4] drm/vmwgfx: remove CONFIG_X86 ifdefs | expand

Commit Message

Christoph Hellwig Jan. 5, 2019, 8:01 a.m. UTC
intel_iommu_enabled is defined as always false for !CONFIG_INTEL_IOMMU,
so remove the ifdefs around it.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 18 ------------------
 1 file changed, 18 deletions(-)

Comments

Thomas Hellstrom Jan. 8, 2019, 10:03 a.m. UTC | #1
Reviewed-by: Thomas Hellstrom <thellstrom@vmware.com>

On Sat, 2019-01-05 at 09:01 +0100, Christoph Hellwig wrote:
> intel_iommu_enabled is defined as always false for
> !CONFIG_INTEL_IOMMU,
> so remove the ifdefs around it.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>  drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 18 ------------------
>  1 file changed, 18 deletions(-)
> 
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
> b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
> index 69e325b2d954..236052ad233c 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
> @@ -567,12 +567,10 @@ static int vmw_dma_select_mode(struct
> vmw_private *dev_priv)
>  		[vmw_dma_map_bind] = "Giving up DMA mappings early."};
>  	const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev-
> >dev);
>  
> -#ifdef CONFIG_INTEL_IOMMU
>  	if (intel_iommu_enabled) {
>  		dev_priv->map_mode = vmw_dma_map_populate;
>  		goto out_fixup;
>  	}
> -#endif
>  
>  	if (!(vmw_force_iommu || vmw_force_coherent)) {
>  		dev_priv->map_mode = vmw_dma_phys;
> @@ -589,9 +587,7 @@ static int vmw_dma_select_mode(struct vmw_private
> *dev_priv)
>  		dev_priv->map_mode = vmw_dma_map_populate;
>  #endif
>  
> -#ifdef CONFIG_INTEL_IOMMU
>  out_fixup:
> -#endif
>  	if (dev_priv->map_mode == vmw_dma_map_populate &&
>  	    vmw_restrict_iommu)
>  		dev_priv->map_mode = vmw_dma_map_bind;
> @@ -599,13 +595,6 @@ static int vmw_dma_select_mode(struct
> vmw_private *dev_priv)
>  	if (vmw_force_coherent)
>  		dev_priv->map_mode = vmw_dma_alloc_coherent;
>  
> -#if !defined(CONFIG_SWIOTLB) && !defined(CONFIG_INTEL_IOMMU)
> -	/*
> -	 * No coherent page pool
> -	 */
> -	if (dev_priv->map_mode == vmw_dma_alloc_coherent)
> -		return -EINVAL;
> -#endif
>  	DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
>  
>  	return 0;
> @@ -619,7 +608,6 @@ static int vmw_dma_select_mode(struct vmw_private
> *dev_priv)
>   * With 32-bit we can only handle 32 bit PFNs. Optionally set that
>   * restriction also for 64-bit systems.
>   */
> -#ifdef CONFIG_INTEL_IOMMU
>  static int vmw_dma_masks(struct vmw_private *dev_priv)
>  {
>  	struct drm_device *dev = dev_priv->dev;
> @@ -631,12 +619,6 @@ static int vmw_dma_masks(struct vmw_private
> *dev_priv)
>  	}
>  	return 0;
>  }
> -#else
> -static int vmw_dma_masks(struct vmw_private *dev_priv)
> -{
> -	return 0;
> -}
> -#endif
>  
>  static int vmw_driver_load(struct drm_device *dev, unsigned long
> chipset)
>  {
Thomas Hellstrom Jan. 8, 2019, 10:55 a.m. UTC | #2
Hi,


On Sat, 2019-01-05 at 09:01 +0100, Christoph Hellwig wrote:
...

> intel_iommu_enabled is defined as always false for
> !CONFIG_INTEL_IOMMU,
> so remove the ifdefs around it.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
> 
> -#if !defined(CONFIG_SWIOTLB) && !defined(CONFIG_INTEL_IOMMU)
> -	/*
> -	 * No coherent page pool
> -	 */
> -	if (dev_priv->map_mode == vmw_dma_alloc_coherent)
> -		return -EINVAL;
> -#endif
> 

Actually this hunk is incorrect, it tries to determine whether the TTM
subsystem maintains a coherent page pool or not. If not, we can't use
vmw_dma_alloc_coherent.

/Thomas
diff mbox series

Patch

diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 69e325b2d954..236052ad233c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -567,12 +567,10 @@  static int vmw_dma_select_mode(struct vmw_private *dev_priv)
 		[vmw_dma_map_bind] = "Giving up DMA mappings early."};
 	const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev);
 
-#ifdef CONFIG_INTEL_IOMMU
 	if (intel_iommu_enabled) {
 		dev_priv->map_mode = vmw_dma_map_populate;
 		goto out_fixup;
 	}
-#endif
 
 	if (!(vmw_force_iommu || vmw_force_coherent)) {
 		dev_priv->map_mode = vmw_dma_phys;
@@ -589,9 +587,7 @@  static int vmw_dma_select_mode(struct vmw_private *dev_priv)
 		dev_priv->map_mode = vmw_dma_map_populate;
 #endif
 
-#ifdef CONFIG_INTEL_IOMMU
 out_fixup:
-#endif
 	if (dev_priv->map_mode == vmw_dma_map_populate &&
 	    vmw_restrict_iommu)
 		dev_priv->map_mode = vmw_dma_map_bind;
@@ -599,13 +595,6 @@  static int vmw_dma_select_mode(struct vmw_private *dev_priv)
 	if (vmw_force_coherent)
 		dev_priv->map_mode = vmw_dma_alloc_coherent;
 
-#if !defined(CONFIG_SWIOTLB) && !defined(CONFIG_INTEL_IOMMU)
-	/*
-	 * No coherent page pool
-	 */
-	if (dev_priv->map_mode == vmw_dma_alloc_coherent)
-		return -EINVAL;
-#endif
 	DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
 
 	return 0;
@@ -619,7 +608,6 @@  static int vmw_dma_select_mode(struct vmw_private *dev_priv)
  * With 32-bit we can only handle 32 bit PFNs. Optionally set that
  * restriction also for 64-bit systems.
  */
-#ifdef CONFIG_INTEL_IOMMU
 static int vmw_dma_masks(struct vmw_private *dev_priv)
 {
 	struct drm_device *dev = dev_priv->dev;
@@ -631,12 +619,6 @@  static int vmw_dma_masks(struct vmw_private *dev_priv)
 	}
 	return 0;
 }
-#else
-static int vmw_dma_masks(struct vmw_private *dev_priv)
-{
-	return 0;
-}
-#endif
 
 static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 {