diff mbox series

mm/hmm: fix hmm_range_dma_map()/hmm_range_dma_unmap()

Message ID 20190409175340.26614-1-jglisse@redhat.com (mailing list archive)
State New, archived
Headers show
Series mm/hmm: fix hmm_range_dma_map()/hmm_range_dma_unmap() | expand

Commit Message

Jerome Glisse April 9, 2019, 5:53 p.m. UTC
From: Jérôme Glisse <jglisse@redhat.com>

Was using wrong field and wrong enum for read only versus read and
write mapping.

Signed-off-by: Jérôme Glisse <jglisse@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Cc: John Hubbard <jhubbard@nvidia.com>
---
 mm/hmm.c | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

Comments

Andrew Morton April 9, 2019, 9:52 p.m. UTC | #1
On Tue,  9 Apr 2019 13:53:40 -0400 jglisse@redhat.com wrote:

> Was using wrong field and wrong enum for read only versus read and
> write mapping.

For thos who were wondering, this fixes
mm-hmm-add-an-helper-function-that-fault-pages-and-map-them-to-a-device-v3.patch,
which is presently queued in -mm.
diff mbox series

Patch

diff --git a/mm/hmm.c b/mm/hmm.c
index 90369fd2307b..ecd16718285e 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -1203,7 +1203,7 @@  long hmm_range_dma_map(struct hmm_range *range,
 
 	npages = (range->end - range->start) >> PAGE_SHIFT;
 	for (i = 0, mapped = 0; i < npages; ++i) {
-		enum dma_data_direction dir = DMA_FROM_DEVICE;
+		enum dma_data_direction dir = DMA_TO_DEVICE;
 		struct page *page;
 
 		/*
@@ -1227,7 +1227,7 @@  long hmm_range_dma_map(struct hmm_range *range,
 		}
 
 		/* If it is read and write than map bi-directional. */
-		if (range->pfns[i] & range->values[HMM_PFN_WRITE])
+		if (range->pfns[i] & range->flags[HMM_PFN_WRITE])
 			dir = DMA_BIDIRECTIONAL;
 
 		daddrs[i] = dma_map_page(device, page, 0, PAGE_SIZE, dir);
@@ -1243,7 +1243,7 @@  long hmm_range_dma_map(struct hmm_range *range,
 
 unmap:
 	for (npages = i, i = 0; (i < npages) && mapped; ++i) {
-		enum dma_data_direction dir = DMA_FROM_DEVICE;
+		enum dma_data_direction dir = DMA_TO_DEVICE;
 		struct page *page;
 
 		page = hmm_device_entry_to_page(range, range->pfns[i]);
@@ -1254,7 +1254,7 @@  long hmm_range_dma_map(struct hmm_range *range,
 			continue;
 
 		/* If it is read and write than map bi-directional. */
-		if (range->pfns[i] & range->values[HMM_PFN_WRITE])
+		if (range->pfns[i] & range->flags[HMM_PFN_WRITE])
 			dir = DMA_BIDIRECTIONAL;
 
 		dma_unmap_page(device, daddrs[i], PAGE_SIZE, dir);
@@ -1298,7 +1298,7 @@  long hmm_range_dma_unmap(struct hmm_range *range,
 
 	npages = (range->end - range->start) >> PAGE_SHIFT;
 	for (i = 0; i < npages; ++i) {
-		enum dma_data_direction dir = DMA_FROM_DEVICE;
+		enum dma_data_direction dir = DMA_TO_DEVICE;
 		struct page *page;
 
 		page = hmm_device_entry_to_page(range, range->pfns[i]);
@@ -1306,7 +1306,7 @@  long hmm_range_dma_unmap(struct hmm_range *range,
 			continue;
 
 		/* If it is read and write than map bi-directional. */
-		if (range->pfns[i] & range->values[HMM_PFN_WRITE]) {
+		if (range->pfns[i] & range->flags[HMM_PFN_WRITE]) {
 			dir = DMA_BIDIRECTIONAL;
 
 			/*