diff mbox series

[RFC,05/10] crypto: engine: transform cur_req in an array

Message ID 20200114135936.32422-6-clabbe.montjoie@gmail.com (mailing list archive)
State RFC
Delegated to: Herbert Xu
Headers show
Series crypto: engine: permit to batch requests | expand

Commit Message

Corentin Labbe Jan. 14, 2020, 1:59 p.m. UTC
For having the ability of doing a batch of request in one do_one_request(), we
should be able to store them in an array. (for unpreparing them later).
This patch converts cur_req in an array of request, but for the moment
hardcode the maximum to 1.

Signed-off-by: Corentin Labbe <clabbe.montjoie@gmail.com>
---
 crypto/crypto_engine.c  | 32 ++++++++++++++++++--------------
 include/crypto/engine.h | 19 +++++++++++++++----
 2 files changed, 33 insertions(+), 18 deletions(-)

Comments

Iuliana Prodan Jan. 16, 2020, 11:34 a.m. UTC | #1
On 1/14/2020 3:59 PM, Corentin Labbe wrote:
> For having the ability of doing a batch of request in one do_one_request(), we
> should be able to store them in an array. (for unpreparing them later).
> This patch converts cur_req in an array of request, but for the moment
> hardcode the maximum to 1.
> 
> Signed-off-by: Corentin Labbe <clabbe.montjoie@gmail.com>
> ---
>   crypto/crypto_engine.c  | 32 ++++++++++++++++++--------------
>   include/crypto/engine.h | 19 +++++++++++++++----
>   2 files changed, 33 insertions(+), 18 deletions(-)
> 
> diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c
> index eb029ff1e05a..b72873550587 100644
> --- a/crypto/crypto_engine.c
> +++ b/crypto/crypto_engine.c
> @@ -30,26 +30,27 @@ static void crypto_finalize_request(struct crypto_engine *engine,
>   	struct crypto_engine_ctx *enginectx;
>   
>   	spin_lock_irqsave(&engine->queue_lock, flags);
> -	if (engine->cur_req == req)
> +	if (engine->cur_reqs[0].req == req)
>   		finalize_cur_req = true;
>   	spin_unlock_irqrestore(&engine->queue_lock, flags);
>   
>   	if (finalize_cur_req) {
> -		enginectx = crypto_tfm_ctx(req->tfm);
> -		if (engine->cur_req_prepared &&
> +		enginectx = crypto_tfm_ctx(engine->cur_reqs[0].req->tfm);
> +		if (engine->cur_reqs[0].prepared &&
>   		    enginectx->op.unprepare_request) {
> -			ret = enginectx->op.unprepare_request(engine, req);
> +			ret = enginectx->op.unprepare_request(engine, engine->cur_reqs[0].req);
>   			if (ret)
>   				dev_err(engine->dev, "failed to unprepare request\n");
>   		}
> +		engine->cur_reqs[0].req->complete(engine->cur_reqs[0].req, err);
>   		spin_lock_irqsave(&engine->queue_lock, flags);
> -		engine->cur_req = NULL;
> -		engine->cur_req_prepared = false;
> +		engine->cur_reqs[0].prepared = false;
> +		engine->cur_reqs[0].req = NULL;
>   		spin_unlock_irqrestore(&engine->queue_lock, flags);
> +	} else {
> +		req->complete(req, err);
>   	}
>   
> -	req->complete(req, err);
> -
>   	kthread_queue_work(engine->kworker, &engine->pump_requests);
>   }
>   
> @@ -74,7 +75,7 @@ static void crypto_pump_requests(struct crypto_engine *engine,
>   	spin_lock_irqsave(&engine->queue_lock, flags);
>   
>   	/* Make sure we are not already running a request */
> -	if (engine->cur_req)
> +	if (engine->cur_reqs[0].req)
>   		goto out;
>   
>   	/* If another context is idling then defer */
> @@ -114,7 +115,7 @@ static void crypto_pump_requests(struct crypto_engine *engine,
>   	if (!async_req)
>   		goto out;
>   
> -	engine->cur_req = async_req;
> +	engine->cur_reqs[0].req = async_req;
>   	if (backlog)
>   		backlog->complete(backlog, -EINPROGRESS);
>   
> @@ -143,14 +144,14 @@ static void crypto_pump_requests(struct crypto_engine *engine,
>   				ret);
>   			goto req_err;
>   		}
> -		engine->cur_req_prepared = true;
> +		engine->cur_reqs[0].prepared = true;
>   	}
>   	if (!enginectx->op.do_one_request) {
>   		dev_err(engine->dev, "failed to do request\n");
>   		ret = -EINVAL;
>   		goto req_err;
>   	}
> -	ret = enginectx->op.do_one_request(engine, async_req);
> +	ret = enginectx->op.do_one_request(engine, engine->cur_reqs[0].req);
>   	if (ret) {
>   		dev_err(engine->dev, "Failed to do one request from queue: %d\n", ret);
>   		goto req_err;
> @@ -158,7 +159,7 @@ static void crypto_pump_requests(struct crypto_engine *engine,
>   	return;
>   
>   req_err:
> -	crypto_finalize_request(engine, async_req, ret);
> +	crypto_finalize_request(engine, engine->cur_reqs[0].req, ret);
>   	return;
>   
>   out:
> @@ -411,10 +412,13 @@ struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
>   	engine->running = false;
>   	engine->busy = false;
>   	engine->idling = false;
> -	engine->cur_req_prepared = false;
>   	engine->priv_data = dev;
>   	snprintf(engine->name, sizeof(engine->name),
>   		 "%s-engine", dev_name(dev));
> +	engine->rmax = 1;
> +	engine->cur_reqs = devm_kzalloc(dev, sizeof(struct cur_req) * engine->rmax, GFP_KERNEL);
> +	if (!engine->cur_reqs)
> +		return NULL;
>   
>   	crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN);
>   	spin_lock_init(&engine->queue_lock);
> diff --git a/include/crypto/engine.h b/include/crypto/engine.h
> index e29cd67f93c7..362134e226f4 100644
> --- a/include/crypto/engine.h
> +++ b/include/crypto/engine.h
> @@ -18,13 +18,23 @@
>   #include <crypto/skcipher.h>
>   
>   #define ENGINE_NAME_LEN	30
> +
> +/*
> + * struct cur_req - Represent a request to be processed
> + * @prepared:	Does the request was prepared
> + * @req:	The request to be processed
> + */
> +struct cur_req {
> +	bool				prepared;
> +	struct crypto_async_request	*req;
> +};
> +
>   /*
>    * struct crypto_engine - crypto hardware engine
>    * @name: the engine name
>    * @idling: the engine is entering idle state
>    * @busy: request pump is busy
>    * @running: the engine is on working
> - * @cur_req_prepared: current request is prepared
>    * @list: link with the global crypto engine list
>    * @queue_lock: spinlock to syncronise access to request queue
>    * @queue: the crypto queue of the engine
> @@ -38,14 +48,14 @@
>    * @kworker: kthread worker struct for request pump
>    * @pump_requests: work struct for scheduling work to the request pump
>    * @priv_data: the engine private data
> - * @cur_req: the current request which is on processing
> + * @rmax:	The number of request which can be processed in one batch
> + * @cur_reqs: 	A list for requests to be processed
>    */
>   struct crypto_engine {
>   	char			name[ENGINE_NAME_LEN];
>   	bool			idling;
>   	bool			busy;
>   	bool			running;
> -	bool			cur_req_prepared;
>   
>   	struct list_head	list;
>   	spinlock_t		queue_lock;
> @@ -61,7 +71,8 @@ struct crypto_engine {
>   	struct kthread_work             pump_requests;
>   
>   	void				*priv_data;
> -	struct crypto_async_request	*cur_req;
> +	int 				rmax;
> +	struct cur_req 			*cur_reqs;
>   };

To keep requests independent IMO it would be best to have a list of 
requests like: struct requests run_queue, where

struct requests {
	unsigned int max_no_reqs;
	unsigned int current_no_reqs;
	struct cur_req *cur_reqs; //list of the requests
} run_queue;
diff mbox series

Patch

diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c
index eb029ff1e05a..b72873550587 100644
--- a/crypto/crypto_engine.c
+++ b/crypto/crypto_engine.c
@@ -30,26 +30,27 @@  static void crypto_finalize_request(struct crypto_engine *engine,
 	struct crypto_engine_ctx *enginectx;
 
 	spin_lock_irqsave(&engine->queue_lock, flags);
-	if (engine->cur_req == req)
+	if (engine->cur_reqs[0].req == req)
 		finalize_cur_req = true;
 	spin_unlock_irqrestore(&engine->queue_lock, flags);
 
 	if (finalize_cur_req) {
-		enginectx = crypto_tfm_ctx(req->tfm);
-		if (engine->cur_req_prepared &&
+		enginectx = crypto_tfm_ctx(engine->cur_reqs[0].req->tfm);
+		if (engine->cur_reqs[0].prepared &&
 		    enginectx->op.unprepare_request) {
-			ret = enginectx->op.unprepare_request(engine, req);
+			ret = enginectx->op.unprepare_request(engine, engine->cur_reqs[0].req);
 			if (ret)
 				dev_err(engine->dev, "failed to unprepare request\n");
 		}
+		engine->cur_reqs[0].req->complete(engine->cur_reqs[0].req, err);
 		spin_lock_irqsave(&engine->queue_lock, flags);
-		engine->cur_req = NULL;
-		engine->cur_req_prepared = false;
+		engine->cur_reqs[0].prepared = false;
+		engine->cur_reqs[0].req = NULL;
 		spin_unlock_irqrestore(&engine->queue_lock, flags);
+	} else {
+		req->complete(req, err);
 	}
 
-	req->complete(req, err);
-
 	kthread_queue_work(engine->kworker, &engine->pump_requests);
 }
 
@@ -74,7 +75,7 @@  static void crypto_pump_requests(struct crypto_engine *engine,
 	spin_lock_irqsave(&engine->queue_lock, flags);
 
 	/* Make sure we are not already running a request */
-	if (engine->cur_req)
+	if (engine->cur_reqs[0].req)
 		goto out;
 
 	/* If another context is idling then defer */
@@ -114,7 +115,7 @@  static void crypto_pump_requests(struct crypto_engine *engine,
 	if (!async_req)
 		goto out;
 
-	engine->cur_req = async_req;
+	engine->cur_reqs[0].req = async_req;
 	if (backlog)
 		backlog->complete(backlog, -EINPROGRESS);
 
@@ -143,14 +144,14 @@  static void crypto_pump_requests(struct crypto_engine *engine,
 				ret);
 			goto req_err;
 		}
-		engine->cur_req_prepared = true;
+		engine->cur_reqs[0].prepared = true;
 	}
 	if (!enginectx->op.do_one_request) {
 		dev_err(engine->dev, "failed to do request\n");
 		ret = -EINVAL;
 		goto req_err;
 	}
-	ret = enginectx->op.do_one_request(engine, async_req);
+	ret = enginectx->op.do_one_request(engine, engine->cur_reqs[0].req);
 	if (ret) {
 		dev_err(engine->dev, "Failed to do one request from queue: %d\n", ret);
 		goto req_err;
@@ -158,7 +159,7 @@  static void crypto_pump_requests(struct crypto_engine *engine,
 	return;
 
 req_err:
-	crypto_finalize_request(engine, async_req, ret);
+	crypto_finalize_request(engine, engine->cur_reqs[0].req, ret);
 	return;
 
 out:
@@ -411,10 +412,13 @@  struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
 	engine->running = false;
 	engine->busy = false;
 	engine->idling = false;
-	engine->cur_req_prepared = false;
 	engine->priv_data = dev;
 	snprintf(engine->name, sizeof(engine->name),
 		 "%s-engine", dev_name(dev));
+	engine->rmax = 1;
+	engine->cur_reqs = devm_kzalloc(dev, sizeof(struct cur_req) * engine->rmax, GFP_KERNEL);
+	if (!engine->cur_reqs)
+		return NULL;
 
 	crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN);
 	spin_lock_init(&engine->queue_lock);
diff --git a/include/crypto/engine.h b/include/crypto/engine.h
index e29cd67f93c7..362134e226f4 100644
--- a/include/crypto/engine.h
+++ b/include/crypto/engine.h
@@ -18,13 +18,23 @@ 
 #include <crypto/skcipher.h>
 
 #define ENGINE_NAME_LEN	30
+
+/*
+ * struct cur_req - Represent a request to be processed
+ * @prepared:	Does the request was prepared
+ * @req:	The request to be processed
+ */
+struct cur_req {
+	bool				prepared;
+	struct crypto_async_request	*req;
+};
+
 /*
  * struct crypto_engine - crypto hardware engine
  * @name: the engine name
  * @idling: the engine is entering idle state
  * @busy: request pump is busy
  * @running: the engine is on working
- * @cur_req_prepared: current request is prepared
  * @list: link with the global crypto engine list
  * @queue_lock: spinlock to syncronise access to request queue
  * @queue: the crypto queue of the engine
@@ -38,14 +48,14 @@ 
  * @kworker: kthread worker struct for request pump
  * @pump_requests: work struct for scheduling work to the request pump
  * @priv_data: the engine private data
- * @cur_req: the current request which is on processing
+ * @rmax:	The number of request which can be processed in one batch
+ * @cur_reqs: 	A list for requests to be processed
  */
 struct crypto_engine {
 	char			name[ENGINE_NAME_LEN];
 	bool			idling;
 	bool			busy;
 	bool			running;
-	bool			cur_req_prepared;
 
 	struct list_head	list;
 	spinlock_t		queue_lock;
@@ -61,7 +71,8 @@  struct crypto_engine {
 	struct kthread_work             pump_requests;
 
 	void				*priv_data;
-	struct crypto_async_request	*cur_req;
+	int 				rmax;
+	struct cur_req 			*cur_reqs;
 };
 
 /*