@@ -203,12 +203,12 @@ static int skcipher_next_fast(struct skcipher_walk *walk)
{
unsigned long diff;
diff = offset_in_page(walk->in.offset) -
offset_in_page(walk->out.offset);
- diff |= (u8 *)scatterwalk_page(&walk->in) -
- (u8 *)scatterwalk_page(&walk->out);
+ diff |= (u8 *)(sg_page(walk->in.sg) + (walk->in.offset >> PAGE_SHIFT)) -
+ (u8 *)(sg_page(walk->out.sg) + (walk->out.offset >> PAGE_SHIFT));
skcipher_map_src(walk);
walk->dst.virt.addr = walk->src.virt.addr;
if (diff) {
@@ -47,39 +47,58 @@ static inline void scatterwalk_start_at_pos(struct scatter_walk *walk,
}
walk->sg = sg;
walk->offset = sg->offset + pos;
}
-static inline unsigned int scatterwalk_pagelen(struct scatter_walk *walk)
-{
- unsigned int len = walk->sg->offset + walk->sg->length - walk->offset;
- unsigned int len_this_page = offset_in_page(~walk->offset) + 1;
- return len_this_page > len ? len : len_this_page;
-}
-
static inline unsigned int scatterwalk_clamp(struct scatter_walk *walk,
unsigned int nbytes)
{
+ unsigned int len_this_sg;
+ unsigned int limit;
+
if (walk->offset >= walk->sg->offset + walk->sg->length)
scatterwalk_start(walk, sg_next(walk->sg));
- return min(nbytes, scatterwalk_pagelen(walk));
-}
-
-static inline struct page *scatterwalk_page(struct scatter_walk *walk)
-{
- return sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
-}
+ len_this_sg = walk->sg->offset + walk->sg->length - walk->offset;
+
+ /*
+ * HIGHMEM case: the page may have to be mapped into memory. To avoid
+ * the complexity of having to map multiple pages at once per sg entry,
+ * clamp the returned length to not cross a page boundary.
+ *
+ * !HIGHMEM case: no mapping is needed; all pages of the sg entry are
+ * already mapped contiguously in the kernel's direct map. For improved
+ * performance, allow the walker to return data segments that cross a
+ * page boundary. Do still cap the length to PAGE_SIZE, since some
+ * users rely on that to avoid disabling preemption for too long when
+ * using SIMD. It's also needed for when skcipher_walk uses a bounce
+ * page due to the data not being aligned to the algorithm's alignmask.
+ */
+ if (IS_ENABLED(CONFIG_HIGHMEM))
+ limit = PAGE_SIZE - offset_in_page(walk->offset);
+ else
+ limit = PAGE_SIZE;
-static inline void scatterwalk_unmap(void *vaddr)
-{
- kunmap_local(vaddr);
+ return min3(nbytes, len_this_sg, limit);
}
static inline void *scatterwalk_map(struct scatter_walk *walk)
{
- return kmap_local_page(scatterwalk_page(walk)) +
- offset_in_page(walk->offset);
+ struct page *base_page = sg_page(walk->sg);
+
+ if (IS_ENABLED(CONFIG_HIGHMEM))
+ return kmap_local_page(base_page + (walk->offset >> PAGE_SHIFT)) +
+ offset_in_page(walk->offset);
+ /*
+ * When !HIGHMEM we allow the walker to return segments that span a page
+ * boundary; see scatterwalk_clamp(). To make it clear that in this
+ * case we're working in the linear buffer of the whole sg entry in the
+ * kernel's direct map rather than within the mapped buffer of a single
+ * page, compute the address as an offset from the page_address() of the
+ * first page of the sg entry. Either way the result is the address in
+ * the direct map, but this makes it clearer what is really going on.
+ */
+ return page_address(base_page) + walk->offset;
}
/**
* scatterwalk_next() - Get the next data buffer in a scatterlist walk
* @walk: the scatter_walk
@@ -96,10 +115,16 @@ static inline void *scatterwalk_next(struct scatter_walk *walk,
{
*nbytes_ret = scatterwalk_clamp(walk, total);
return scatterwalk_map(walk);
}
+static inline void scatterwalk_unmap(const void *vaddr)
+{
+ if (IS_ENABLED(CONFIG_HIGHMEM))
+ kunmap_local(vaddr);
+}
+
static inline void scatterwalk_advance(struct scatter_walk *walk,
unsigned int nbytes)
{
walk->offset += nbytes;
}
@@ -114,11 +139,11 @@ static inline void scatterwalk_advance(struct scatter_walk *walk,
* Use this if the @vaddr was not written to, i.e. it is source data.
*/
static inline void scatterwalk_done_src(struct scatter_walk *walk,
const void *vaddr, unsigned int nbytes)
{
- scatterwalk_unmap((void *)vaddr);
+ scatterwalk_unmap(vaddr);
scatterwalk_advance(walk, nbytes);
}
/**
* scatterwalk_done_dst() - Finish one step of a walk of destination scatterlist
@@ -131,12 +156,20 @@ static inline void scatterwalk_done_src(struct scatter_walk *walk,
*/
static inline void scatterwalk_done_dst(struct scatter_walk *walk,
void *vaddr, unsigned int nbytes)
{
scatterwalk_unmap(vaddr);
- if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE)
- flush_dcache_page(scatterwalk_page(walk));
+ if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE) {
+ struct page *base_page, *start_page, *end_page, *page;
+
+ base_page = sg_page(walk->sg);
+ start_page = base_page + (walk->offset >> PAGE_SHIFT);
+ end_page = base_page + ((walk->offset + nbytes +
+ PAGE_SIZE - 1) >> PAGE_SHIFT);
+ for (page = start_page; page < end_page; page++)
+ flush_dcache_page(page);
+ }
scatterwalk_advance(walk, nbytes);
}
void scatterwalk_skip(struct scatter_walk *walk, unsigned int nbytes);