@@ -36,15 +36,4 @@ int arch_dma_supported(struct device *dev, u64 mask);
bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp);
#define arch_dma_alloc_attrs arch_dma_alloc_attrs
-static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
-{
- if (dev->coherent_dma_mask <= DMA_BIT_MASK(24))
- gfp |= GFP_DMA;
-#ifdef CONFIG_X86_64
- if (dev->coherent_dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA))
- gfp |= GFP_DMA32;
-#endif
- return gfp;
-}
-
#endif
@@ -487,7 +487,6 @@ gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
if (!force_iommu || dev->coherent_dma_mask <= DMA_BIT_MASK(24))
return dma_direct_alloc(dev, size, dma_addr, flag, attrs);
- flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
page = alloc_pages(flag | __GFP_ZERO, get_order(size));
if (!page)
return NULL;
@@ -446,8 +446,6 @@ static void* calgary_alloc_coherent(struct device *dev, size_t size,
npages = size >> PAGE_SHIFT;
order = get_order(size);
- flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
-
/* alloc enough pages (and possibly more) */
ret = (void *)__get_free_pages(flag, order);
if (!ret)
@@ -82,8 +82,6 @@ bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp)
if (!*dev)
*dev = &x86_dma_fallback_dev;
- *gfp = dma_alloc_coherent_gfp_flags(*dev, *gfp);
-
if (!is_device_dma_capable(*dev))
return false;
return true;
@@ -208,13 +208,6 @@ static void *sev_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
void *vaddr = NULL;
order = get_order(size);
-
- /*
- * Memory will be memset to zero after marking decrypted, so don't
- * bother clearing it before.
- */
- gfp &= ~__GFP_ZERO;
-
page = alloc_pages_node(dev_to_node(dev), gfp, order);
if (page) {
dma_addr_t addr;
@@ -2600,7 +2600,6 @@ static void *alloc_coherent(struct device *dev, size_t size,
dma_dom = to_dma_ops_domain(domain);
size = PAGE_ALIGN(size);
dma_mask = dev->coherent_dma_mask;
- flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
flag |= __GFP_ZERO;
page = alloc_pages(flag | __GFP_NOWARN, get_order(size));
@@ -3718,7 +3718,6 @@ static void *intel_alloc_coherent(struct device *dev, size_t size,
size = PAGE_ALIGN(size);
order = get_order(size);
- flags &= ~(GFP_DMA | GFP_DMA32);
if (gfpflags_allow_blocking(flags)) {
unsigned int count = size >> PAGE_SHIFT;
All dma_ops implementations used on x86 now take care of setting their own required GFP_ masks for the allocation. And given that the common code now clears harmful flags itself that means we can stop the flags in all the iommu implementations as well. Signed-off-by: Christoph Hellwig <hch@lst.de> --- arch/x86/include/asm/dma-mapping.h | 11 ----------- arch/x86/kernel/amd_gart_64.c | 1 - arch/x86/kernel/pci-calgary_64.c | 2 -- arch/x86/kernel/pci-dma.c | 2 -- arch/x86/mm/mem_encrypt.c | 7 ------- drivers/iommu/amd_iommu.c | 1 - drivers/iommu/intel-iommu.c | 1 - 7 files changed, 25 deletions(-)