@@ -38,6 +38,7 @@ static u32 esp4_get_mtu(struct xfrm_state *x, int mtu);
static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqhilen)
{
unsigned int len;
+ gfp_t gfp = GFP_ATOMIC;
len = seqhilen;
@@ -54,7 +55,11 @@ static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqhilen)
len += sizeof(struct scatterlist) * nfrags;
- return kmalloc(len, GFP_ATOMIC);
+ if (crypto_aead_reqsize(aead) &&
+ (crypto_aead_get_flags(aead) & CRYPTO_TFM_REQ_DMA))
+ gfp |= GFP_DMA;
+
+ return kmalloc(len, gfp);
}
static inline __be32 *esp_tmp_seqhi(void *tmp)
@@ -65,6 +65,7 @@ static u32 esp6_get_mtu(struct xfrm_state *x, int mtu);
static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqihlen)
{
unsigned int len;
+ gfp_t gfp = GFP_ATOMIC;
len = seqihlen;
@@ -81,7 +82,11 @@ static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqihlen)
len += sizeof(struct scatterlist) * nfrags;
- return kmalloc(len, GFP_ATOMIC);
+ if (crypto_aead_reqsize(aead) &&
+ (crypto_aead_get_flags(aead) & CRYPTO_TFM_REQ_DMA))
+ gfp |= GFP_DMA;
+
+ return kmalloc(len, gfp);
}
static inline __be32 *esp_tmp_seqhi(void *tmp)
Some crypto backends might require the requests' private contexts to be allocated in DMA-able memory. Signed-off-by: Horia Geanta <horia.geanta@freescale.com> --- Depends on patch 1/4 (sent only on crypto list) that adds the CRYPTO_TFM_REQ_DMA flag. net/ipv4/esp4.c | 7 ++++++- net/ipv6/esp6.c | 7 ++++++- 2 files changed, 12 insertions(+), 2 deletions(-)