diff mbox

[19/26] drm/i915: Consolidate dma mappings

Message ID 1395121738-29126-20-git-send-email-benjamin.widawsky@intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Ben Widawsky March 18, 2014, 5:48 a.m. UTC
With a little bit of macro magic, and the fact that every page
table/dir/etc. we wish to map will have a page, and daddr member, we can
greatly simplify and reduce code.

The patch introduces an i915_dma_map/unmap which has the same semantics
as pci_map_page, but is 1 line, and doesn't require newlines, or local
variables to make it fit cleanly.

Notice that even the page allocation shares this same attribute. For
now, I am leaving that code untouched because the macro version would be
a bit on the big side - but it's a nice cleanup as well (IMO)

Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
---
 drivers/gpu/drm/i915/i915_gem_gtt.c | 56 ++++++++++++-------------------------
 1 file changed, 18 insertions(+), 38 deletions(-)

Comments

Chris Wilson March 18, 2014, 9:28 a.m. UTC | #1
On Mon, Mar 17, 2014 at 10:48:51PM -0700, Ben Widawsky wrote:
> With a little bit of macro magic, and the fact that every page
> table/dir/etc. we wish to map will have a page, and daddr member, we can
> greatly simplify and reduce code.
> 
> The patch introduces an i915_dma_map/unmap which has the same semantics
> as pci_map_page, but is 1 line, and doesn't require newlines, or local
> variables to make it fit cleanly.
> 
> Notice that even the page allocation shares this same attribute. For
> now, I am leaving that code untouched because the macro version would be
> a bit on the big side - but it's a nice cleanup as well (IMO)

Doesn't this make the error unwinding very fragile and likely to unmap a
pci_dma_mapping_error() cookie rather than the dma_addr_t?
-Chris
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 9630109..abef33dd 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -183,45 +183,33 @@  static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
 	return pte;
 }
 
-#define dma_unmap_pt_single(pt, dev) do { \
-	pci_unmap_page((dev)->pdev, (pt)->daddr, 4096, PCI_DMA_BIDIRECTIONAL); \
+#define i915_dma_unmap_single(px, dev) do { \
+	pci_unmap_page((dev)->pdev, (px)->daddr, 4096, PCI_DMA_BIDIRECTIONAL); \
 } while (0);
 
 /**
- * dma_map_pt_single() - Create a dma mapping for a page table
- * @pt:		Page table to get a DMA map for
+ * i915_dma_map_px_single() - Create a dma mapping for a page table/dir/etc.
+ * @px:		Page table/dir/etc to get a DMA map for
  * @dev:	drm device
  *
  * Page table allocations are unified across all gens. They always require a
- * single 4k allocation, as well as a DMA mapping.
+ * single 4k allocation, as well as a DMA mapping. If we keep the structs
+ * symmetric here, the simple macro covers us for every page table type.
  *
  * Return: 0 if success.
  */
-static int dma_map_pt_single(struct i915_pagetab *pt, struct drm_device *dev)
-{
-	struct page *page;
-	dma_addr_t pt_addr;
-	int ret;
-
-	page = pt->page;
-	pt_addr = pci_map_page(dev->pdev, page, 0, 4096,
-			       PCI_DMA_BIDIRECTIONAL);
-
-	ret = pci_dma_mapping_error(dev->pdev, pt_addr);
-	if (ret)
-		return ret;
-
-	pt->daddr = pt_addr;
-
-	return 0;
-}
+#define i915_dma_map_px_single(px, dev) \
+	pci_dma_mapping_error((dev)->pdev, \
+			      (px)->daddr = pci_map_page((dev)->pdev, \
+							 (px)->page, 0, 4096, \
+							 PCI_DMA_BIDIRECTIONAL))
 
 static void free_pt_single(struct i915_pagetab *pt, struct drm_device *dev)
 {
 	if (WARN_ON(!pt->page))
 		return;
 
-	dma_unmap_pt_single(pt, dev);
+	i915_dma_unmap_single(pt, dev);
 	__free_page(pt->page);
 	kfree(pt);
 }
@@ -241,7 +229,7 @@  static struct i915_pagetab *alloc_pt_single(struct drm_device *dev)
 		return ERR_PTR(-ENOMEM);
 	}
 
-	ret = dma_map_pt_single(pt, dev);
+	ret = i915_dma_map_px_single(pt, dev);
 	if (ret) {
 		__free_page(pt->page);
 		kfree(pt);
@@ -484,7 +472,7 @@  static void gen8_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
 
 static void gen8_ppgtt_dma_unmap_pages(struct i915_hw_ppgtt *ppgtt)
 {
-	struct pci_dev *hwdev = ppgtt->base.dev->pdev;
+	struct drm_device *dev = ppgtt->base.dev;
 	int i, j;
 
 	for (i = 0; i < ppgtt->num_pd_pages; i++) {
@@ -493,16 +481,14 @@  static void gen8_ppgtt_dma_unmap_pages(struct i915_hw_ppgtt *ppgtt)
 		if (!ppgtt->pdp.pagedir[i]->daddr)
 			continue;
 
-		pci_unmap_page(hwdev, ppgtt->pdp.pagedir[i]->daddr, PAGE_SIZE,
-			       PCI_DMA_BIDIRECTIONAL);
+		i915_dma_unmap_single(ppgtt->pdp.pagedir[i], dev);
 
 		for (j = 0; j < I915_PDES_PER_PD; j++) {
 			struct i915_pagedir *pd = ppgtt->pdp.pagedir[i];
 			struct i915_pagetab *pt =  pd->page_tables[j];
 			dma_addr_t addr = pt->daddr;
 			if (addr)
-				pci_unmap_page(hwdev, addr, PAGE_SIZE,
-					       PCI_DMA_BIDIRECTIONAL);
+				i915_dma_unmap_single(pt, dev);
 		}
 	}
 }
@@ -588,19 +574,13 @@  err_out:
 static int gen8_ppgtt_setup_page_directories(struct i915_hw_ppgtt *ppgtt,
 					     const int pdpe)
 {
-	dma_addr_t pd_addr;
 	int ret;
 
-	pd_addr = pci_map_page(ppgtt->base.dev->pdev,
-			       ppgtt->pdp.pagedir[pdpe]->page, 0,
-			       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-
-	ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pd_addr);
+	ret = i915_dma_map_px_single(ppgtt->pdp.pagedir[pdpe],
+				     ppgtt->base.dev);
 	if (ret)
 		return ret;
 
-	ppgtt->pdp.pagedir[pdpe]->daddr = pd_addr;
-
 	return 0;
 }