diff mbox series

[v1,1/2] hw/rdma: Cosmetic change - no need for two sge arrays

Message ID 20200320143429.9490-2-yuval.shaia.ml@gmail.com (mailing list archive)
State New, archived
Headers show
Series hw/rdma: Last step in eliminating data-path processing | expand

Commit Message

Yuval Shaia March 20, 2020, 2:34 p.m. UTC
The function build_host_sge_array uses two sge arrays, one for input and
one for output.
Since the size of the two arrays is the same, the function can write
directly to the given source array (i.e. input/output argument).

Signed-off-by: Yuval Shaia <yuval.shaia.ml@gmail.com>
---
 hw/rdma/rdma_backend.c | 40 ++++++++++++++++------------------------
 1 file changed, 16 insertions(+), 24 deletions(-)

Comments

Marcel Apfelbaum March 21, 2020, 4:50 p.m. UTC | #1
On 3/20/20 4:34 PM, Yuval Shaia wrote:
> The function build_host_sge_array uses two sge arrays, one for input and
> one for output.
> Since the size of the two arrays is the same, the function can write
> directly to the given source array (i.e. input/output argument).
>
> Signed-off-by: Yuval Shaia <yuval.shaia.ml@gmail.com>
> ---
>   hw/rdma/rdma_backend.c | 40 ++++++++++++++++------------------------
>   1 file changed, 16 insertions(+), 24 deletions(-)
>
> diff --git a/hw/rdma/rdma_backend.c b/hw/rdma/rdma_backend.c
> index c346407cd3..b7ffbef9c0 100644
> --- a/hw/rdma/rdma_backend.c
> +++ b/hw/rdma/rdma_backend.c
> @@ -378,30 +378,25 @@ static void ah_cache_init(void)
>   }
>   
>   static int build_host_sge_array(RdmaDeviceResources *rdma_dev_res,
> -                                struct ibv_sge *dsge, struct ibv_sge *ssge,
> -                                uint8_t num_sge, uint64_t *total_length)
> +                                struct ibv_sge *sge, uint8_t num_sge,
> +                                uint64_t *total_length)
>   {
>       RdmaRmMR *mr;
> -    int ssge_idx;
> +    int idx;
>   
> -    for (ssge_idx = 0; ssge_idx < num_sge; ssge_idx++) {
> -        mr = rdma_rm_get_mr(rdma_dev_res, ssge[ssge_idx].lkey);
> +    for (idx = 0; idx < num_sge; idx++) {
> +        mr = rdma_rm_get_mr(rdma_dev_res, sge[idx].lkey);
>           if (unlikely(!mr)) {
> -            rdma_error_report("Invalid lkey 0x%x", ssge[ssge_idx].lkey);
> -            return VENDOR_ERR_INVLKEY | ssge[ssge_idx].lkey;
> +            rdma_error_report("Invalid lkey 0x%x", sge[idx].lkey);
> +            return VENDOR_ERR_INVLKEY | sge[idx].lkey;
>           }
>   
>   #ifdef LEGACY_RDMA_REG_MR
> -        dsge->addr = (uintptr_t)mr->virt + ssge[ssge_idx].addr - mr->start;
> -#else
> -        dsge->addr = ssge[ssge_idx].addr;
> +        sge[idx].addr = (uintptr_t)mr->virt + sge[idx].addr - mr->start;
>   #endif
> -        dsge->length = ssge[ssge_idx].length;
> -        dsge->lkey = rdma_backend_mr_lkey(&mr->backend_mr);
> -
> -        *total_length += dsge->length;
> +        sge[idx].lkey = rdma_backend_mr_lkey(&mr->backend_mr);
>   
> -        dsge++;
> +        *total_length += sge[idx].length;
>       }
>   
>       return 0;
> @@ -484,7 +479,6 @@ void rdma_backend_post_send(RdmaBackendDev *backend_dev,
>                               void *ctx)
>   {
>       BackendCtx *bctx;
> -    struct ibv_sge new_sge[MAX_SGE];
>       uint32_t bctx_id;
>       int rc;
>       struct ibv_send_wr wr = {}, *bad_wr;
> @@ -518,7 +512,7 @@ void rdma_backend_post_send(RdmaBackendDev *backend_dev,
>   
>       rdma_protected_gslist_append_int32(&qp->cqe_ctx_list, bctx_id);
>   
> -    rc = build_host_sge_array(backend_dev->rdma_dev_res, new_sge, sge, num_sge,
> +    rc = build_host_sge_array(backend_dev->rdma_dev_res, sge, num_sge,
>                                 &backend_dev->rdma_dev_res->stats.tx_len);
>       if (rc) {
>           complete_work(IBV_WC_GENERAL_ERR, rc, ctx);
> @@ -538,7 +532,7 @@ void rdma_backend_post_send(RdmaBackendDev *backend_dev,
>       wr.num_sge = num_sge;
>       wr.opcode = IBV_WR_SEND;
>       wr.send_flags = IBV_SEND_SIGNALED;
> -    wr.sg_list = new_sge;
> +    wr.sg_list = sge;
>       wr.wr_id = bctx_id;
>   
>       rc = ibv_post_send(qp->ibqp, &wr, &bad_wr);
> @@ -601,7 +595,6 @@ void rdma_backend_post_recv(RdmaBackendDev *backend_dev,
>                               struct ibv_sge *sge, uint32_t num_sge, void *ctx)
>   {
>       BackendCtx *bctx;
> -    struct ibv_sge new_sge[MAX_SGE];
>       uint32_t bctx_id;
>       int rc;
>       struct ibv_recv_wr wr = {}, *bad_wr;
> @@ -635,7 +628,7 @@ void rdma_backend_post_recv(RdmaBackendDev *backend_dev,
>   
>       rdma_protected_gslist_append_int32(&qp->cqe_ctx_list, bctx_id);
>   
> -    rc = build_host_sge_array(backend_dev->rdma_dev_res, new_sge, sge, num_sge,
> +    rc = build_host_sge_array(backend_dev->rdma_dev_res, sge, num_sge,
>                                 &backend_dev->rdma_dev_res->stats.rx_bufs_len);
>       if (rc) {
>           complete_work(IBV_WC_GENERAL_ERR, rc, ctx);
> @@ -643,7 +636,7 @@ void rdma_backend_post_recv(RdmaBackendDev *backend_dev,
>       }
>   
>       wr.num_sge = num_sge;
> -    wr.sg_list = new_sge;
> +    wr.sg_list = sge;
>       wr.wr_id = bctx_id;
>       rc = ibv_post_recv(qp->ibqp, &wr, &bad_wr);
>       if (rc) {
> @@ -671,7 +664,6 @@ void rdma_backend_post_srq_recv(RdmaBackendDev *backend_dev,
>                                   uint32_t num_sge, void *ctx)
>   {
>       BackendCtx *bctx;
> -    struct ibv_sge new_sge[MAX_SGE];
>       uint32_t bctx_id;
>       int rc;
>       struct ibv_recv_wr wr = {}, *bad_wr;
> @@ -688,7 +680,7 @@ void rdma_backend_post_srq_recv(RdmaBackendDev *backend_dev,
>   
>       rdma_protected_gslist_append_int32(&srq->cqe_ctx_list, bctx_id);
>   
> -    rc = build_host_sge_array(backend_dev->rdma_dev_res, new_sge, sge, num_sge,
> +    rc = build_host_sge_array(backend_dev->rdma_dev_res, sge, num_sge,
>                                 &backend_dev->rdma_dev_res->stats.rx_bufs_len);
>       if (rc) {
>           complete_work(IBV_WC_GENERAL_ERR, rc, ctx);
> @@ -696,7 +688,7 @@ void rdma_backend_post_srq_recv(RdmaBackendDev *backend_dev,
>       }
>   
>       wr.num_sge = num_sge;
> -    wr.sg_list = new_sge;
> +    wr.sg_list = sge;
>       wr.wr_id = bctx_id;
>       rc = ibv_post_srq_recv(srq->ibsrq, &wr, &bad_wr);
>       if (rc) {


Reviewed-by: Marcel Apfelbaum <marcel.apfelbaum@gmail.com>

Thanks,
Marcel
diff mbox series

Patch

diff --git a/hw/rdma/rdma_backend.c b/hw/rdma/rdma_backend.c
index c346407cd3..b7ffbef9c0 100644
--- a/hw/rdma/rdma_backend.c
+++ b/hw/rdma/rdma_backend.c
@@ -378,30 +378,25 @@  static void ah_cache_init(void)
 }
 
 static int build_host_sge_array(RdmaDeviceResources *rdma_dev_res,
-                                struct ibv_sge *dsge, struct ibv_sge *ssge,
-                                uint8_t num_sge, uint64_t *total_length)
+                                struct ibv_sge *sge, uint8_t num_sge,
+                                uint64_t *total_length)
 {
     RdmaRmMR *mr;
-    int ssge_idx;
+    int idx;
 
-    for (ssge_idx = 0; ssge_idx < num_sge; ssge_idx++) {
-        mr = rdma_rm_get_mr(rdma_dev_res, ssge[ssge_idx].lkey);
+    for (idx = 0; idx < num_sge; idx++) {
+        mr = rdma_rm_get_mr(rdma_dev_res, sge[idx].lkey);
         if (unlikely(!mr)) {
-            rdma_error_report("Invalid lkey 0x%x", ssge[ssge_idx].lkey);
-            return VENDOR_ERR_INVLKEY | ssge[ssge_idx].lkey;
+            rdma_error_report("Invalid lkey 0x%x", sge[idx].lkey);
+            return VENDOR_ERR_INVLKEY | sge[idx].lkey;
         }
 
 #ifdef LEGACY_RDMA_REG_MR
-        dsge->addr = (uintptr_t)mr->virt + ssge[ssge_idx].addr - mr->start;
-#else
-        dsge->addr = ssge[ssge_idx].addr;
+        sge[idx].addr = (uintptr_t)mr->virt + sge[idx].addr - mr->start;
 #endif
-        dsge->length = ssge[ssge_idx].length;
-        dsge->lkey = rdma_backend_mr_lkey(&mr->backend_mr);
-
-        *total_length += dsge->length;
+        sge[idx].lkey = rdma_backend_mr_lkey(&mr->backend_mr);
 
-        dsge++;
+        *total_length += sge[idx].length;
     }
 
     return 0;
@@ -484,7 +479,6 @@  void rdma_backend_post_send(RdmaBackendDev *backend_dev,
                             void *ctx)
 {
     BackendCtx *bctx;
-    struct ibv_sge new_sge[MAX_SGE];
     uint32_t bctx_id;
     int rc;
     struct ibv_send_wr wr = {}, *bad_wr;
@@ -518,7 +512,7 @@  void rdma_backend_post_send(RdmaBackendDev *backend_dev,
 
     rdma_protected_gslist_append_int32(&qp->cqe_ctx_list, bctx_id);
 
-    rc = build_host_sge_array(backend_dev->rdma_dev_res, new_sge, sge, num_sge,
+    rc = build_host_sge_array(backend_dev->rdma_dev_res, sge, num_sge,
                               &backend_dev->rdma_dev_res->stats.tx_len);
     if (rc) {
         complete_work(IBV_WC_GENERAL_ERR, rc, ctx);
@@ -538,7 +532,7 @@  void rdma_backend_post_send(RdmaBackendDev *backend_dev,
     wr.num_sge = num_sge;
     wr.opcode = IBV_WR_SEND;
     wr.send_flags = IBV_SEND_SIGNALED;
-    wr.sg_list = new_sge;
+    wr.sg_list = sge;
     wr.wr_id = bctx_id;
 
     rc = ibv_post_send(qp->ibqp, &wr, &bad_wr);
@@ -601,7 +595,6 @@  void rdma_backend_post_recv(RdmaBackendDev *backend_dev,
                             struct ibv_sge *sge, uint32_t num_sge, void *ctx)
 {
     BackendCtx *bctx;
-    struct ibv_sge new_sge[MAX_SGE];
     uint32_t bctx_id;
     int rc;
     struct ibv_recv_wr wr = {}, *bad_wr;
@@ -635,7 +628,7 @@  void rdma_backend_post_recv(RdmaBackendDev *backend_dev,
 
     rdma_protected_gslist_append_int32(&qp->cqe_ctx_list, bctx_id);
 
-    rc = build_host_sge_array(backend_dev->rdma_dev_res, new_sge, sge, num_sge,
+    rc = build_host_sge_array(backend_dev->rdma_dev_res, sge, num_sge,
                               &backend_dev->rdma_dev_res->stats.rx_bufs_len);
     if (rc) {
         complete_work(IBV_WC_GENERAL_ERR, rc, ctx);
@@ -643,7 +636,7 @@  void rdma_backend_post_recv(RdmaBackendDev *backend_dev,
     }
 
     wr.num_sge = num_sge;
-    wr.sg_list = new_sge;
+    wr.sg_list = sge;
     wr.wr_id = bctx_id;
     rc = ibv_post_recv(qp->ibqp, &wr, &bad_wr);
     if (rc) {
@@ -671,7 +664,6 @@  void rdma_backend_post_srq_recv(RdmaBackendDev *backend_dev,
                                 uint32_t num_sge, void *ctx)
 {
     BackendCtx *bctx;
-    struct ibv_sge new_sge[MAX_SGE];
     uint32_t bctx_id;
     int rc;
     struct ibv_recv_wr wr = {}, *bad_wr;
@@ -688,7 +680,7 @@  void rdma_backend_post_srq_recv(RdmaBackendDev *backend_dev,
 
     rdma_protected_gslist_append_int32(&srq->cqe_ctx_list, bctx_id);
 
-    rc = build_host_sge_array(backend_dev->rdma_dev_res, new_sge, sge, num_sge,
+    rc = build_host_sge_array(backend_dev->rdma_dev_res, sge, num_sge,
                               &backend_dev->rdma_dev_res->stats.rx_bufs_len);
     if (rc) {
         complete_work(IBV_WC_GENERAL_ERR, rc, ctx);
@@ -696,7 +688,7 @@  void rdma_backend_post_srq_recv(RdmaBackendDev *backend_dev,
     }
 
     wr.num_sge = num_sge;
-    wr.sg_list = new_sge;
+    wr.sg_list = sge;
     wr.wr_id = bctx_id;
     rc = ibv_post_srq_recv(srq->ibsrq, &wr, &bad_wr);
     if (rc) {