@@ -979,8 +979,9 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
struct bio *clone;
unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM;
- unsigned i, len;
+ unsigned i, len, remaining_size;
struct page *page;
+ struct bio_vec *bvec;
retry:
if (unlikely(gfp_mask & __GFP_WAIT))
@@ -992,6 +993,8 @@ retry:
clone_init(io, clone);
+ remaining_size = size;
+
for (i = 0; i < nr_iovecs; i++) {
page = mempool_alloc(cc->page_pool, gfp_mask);
if (!page) {
@@ -1001,18 +1004,16 @@ retry:
goto retry;
}
- len = (size > PAGE_SIZE) ? PAGE_SIZE : size;
+ len = (remaining_size > PAGE_SIZE) ? PAGE_SIZE : remaining_size;
- if (!bio_add_page(clone, page, len, 0)) {
- DMERR("bio_add_page failed for page %d: the underlying device has stricter limits than dm-crypt target", i);
- mempool_free(page, cc->page_pool);
- crypt_free_buffer_pages(cc, clone);
- bio_put(clone);
- clone = NULL;
- goto return_clone;
- }
+ bvec = &clone->bi_io_vec[clone->bi_vcnt++];
+ bvec->bv_page = page;
+ bvec->bv_len = len;
+ bvec->bv_offset = 0;
+
+ clone->bi_iter.bi_size += len;
- size -= len;
+ remaining_size -= len;
}
return_clone:
Also note, I also folded this into patch 6 (BUG_ON conveys intent that
mempool_alloc should _never_ fail once __GFP_WAIT is set in
gfp_mask.. if it did we'd deadlock trying to re-acquire the mutex,
rather than BUG_ON but...):
@@ -967,10 +967,10 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);
* the mempool concurrently, it may deadlock in a situation where both processes
* have allocated 128 pages and the mempool is exhausted.
*
- * In order to avoid this scenarios, we allocate the pages under a mutex.
+ * In order to avoid this scenario we allocate the pages under a mutex.
*
* In order to not degrade performance with excessive locking, we try
- * non-blocking allocations without a mutex first and if it fails, we fallback
+ * non-blocking allocation without a mutex first and if it fails, we fallback
* to a blocking allocation with a mutex.
*/
static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
@@ -998,6 +998,7 @@ retry:
for (i = 0; i < nr_iovecs; i++) {
page = mempool_alloc(cc->page_pool, gfp_mask);
if (!page) {
+ BUG_ON(gfp_mask & __GFP_WAIT);
crypt_free_buffer_pages(cc, clone);
bio_put(clone);
gfp_mask |= __GFP_WAIT;