[01/14] scatterlist: add sg_kmap_atomic / sg_kunmap_atomic helpers
diff mbox series

Message ID 20190212072528.13167-2-hch@lst.de
State New
Headers show
Series
  • [01/14] scatterlist: add sg_kmap_atomic / sg_kunmap_atomic helpers
Related show

Commit Message

Christoph Hellwig Feb. 12, 2019, 7:25 a.m. UTC
This avoids bug prone open coding of the sg offset handling and
also helps to document the limitations of mapping scatterlist
entries.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 include/linux/scatterlist.h | 26 ++++++++++++++++++++++++++
 1 file changed, 26 insertions(+)

Patch
diff mbox series

diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index b96f0d0b5b8f..524cd8448a48 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -2,6 +2,7 @@ 
 #ifndef _LINUX_SCATTERLIST_H
 #define _LINUX_SCATTERLIST_H
 
+#include <linux/highmem.h>
 #include <linux/string.h>
 #include <linux/types.h>
 #include <linux/bug.h>
@@ -239,6 +240,31 @@  static inline void *sg_virt(struct scatterlist *sg)
 	return page_address(sg_page(sg)) + sg->offset;
 }
 
+/**
+ * sg_kmap_atomic - map a S/G list entry to a kernel address
+ * @sg:		scatterlist entry
+ *
+ * Return a kernel address for scatterlist entry by kmapping it.  Note that
+ * this function must only be called on scatterlist entries that do not span
+ * multiple pages.
+ */
+static inline void *sg_kmap_atomic(struct scatterlist *sg)
+{
+	if (WARN_ON_ONCE(sg->offset + sg->length > PAGE_SIZE))
+		return NULL;
+	return kmap_atomic(sg_page(sg)) + sg->offset;
+}
+
+/**
+ * sg_kunmap_atomic - unmap a S/G list entry to a kernel address
+ * @sg:		scatterlist entry
+ * @ptr:	address returned from sg_kmap_atomic
+ */
+static inline void sg_kunmap_atomic(struct scatterlist *sg, void *ptr)
+{
+	kunmap_atomic(ptr - sg->offset);
+}
+
 /**
  * sg_init_marker - Initialize markers in sg table
  * @sgl:	   The SG table