diff mbox series

[v2,2/2] migration/rdma: advise prefetch write for ODP region

Message ID 20210823033358.3002-3-lizhijian@cn.fujitsu.com (mailing list archive)
State New, archived
Headers show
Series enable fsdax rdma migration | expand

Commit Message

Li Zhijian Aug. 23, 2021, 3:33 a.m. UTC
The responder mr registering with ODP will sent RNR NAK back to
the requester in the face of the page fault.
---------
ibv_poll_cq wc.status=13 RNR retry counter exceeded!
ibv_poll_cq wrid=WRITE RDMA!
---------
ibv_advise_mr(3) helps to make pages present before the actual IO is
conducted so that the responder does page fault as little as possible.

Signed-off-by: Li Zhijian <lizhijian@cn.fujitsu.com>
Reviewed-by: Marcel Apfelbaum <marcel.apfelbaum@gmail.com>

---
V2: use IBV_ADVISE_MR_FLAG_FLUSH instead of IB_UVERBS_ADVISE_MR_FLAG_FLUSH
    and add Reviewed-by tag. # Marcel
---
 migration/rdma.c       | 40 ++++++++++++++++++++++++++++++++++++++++
 migration/trace-events |  1 +
 2 files changed, 41 insertions(+)

Comments

Zhijian Li (Fujitsu) Aug. 23, 2021, 8:42 a.m. UTC | #1
CCing Marcel


On 23/08/2021 11:33, Li Zhijian wrote:
> The responder mr registering with ODP will sent RNR NAK back to
> the requester in the face of the page fault.
> ---------
> ibv_poll_cq wc.status=13 RNR retry counter exceeded!
> ibv_poll_cq wrid=WRITE RDMA!
> ---------
> ibv_advise_mr(3) helps to make pages present before the actual IO is
> conducted so that the responder does page fault as little as possible.
>
> Signed-off-by: Li Zhijian <lizhijian@cn.fujitsu.com>
> Reviewed-by: Marcel Apfelbaum <marcel.apfelbaum@gmail.com>
>
> ---
> V2: use IBV_ADVISE_MR_FLAG_FLUSH instead of IB_UVERBS_ADVISE_MR_FLAG_FLUSH
>      and add Reviewed-by tag. # Marcel
> ---
>   migration/rdma.c       | 40 ++++++++++++++++++++++++++++++++++++++++
>   migration/trace-events |  1 +
>   2 files changed, 41 insertions(+)
>
> diff --git a/migration/rdma.c b/migration/rdma.c
> index eb80431aae2..6c2cc3f617c 100644
> --- a/migration/rdma.c
> +++ b/migration/rdma.c
> @@ -1133,6 +1133,30 @@ static bool rdma_support_odp(struct ibv_context *dev)
>       return false;
>   }
>   
> +/*
> + * ibv_advise_mr to avoid RNR NAK error as far as possible.
> + * The responder mr registering with ODP will sent RNR NAK back to
> + * the requester in the face of the page fault.
> + */
> +static void qemu_rdma_advise_prefetch_mr(struct ibv_pd *pd, uint64_t addr,
> +                                         uint32_t len,  uint32_t lkey,
> +                                         const char *name, bool wr)
> +{
> +    int ret;
> +    int advice = wr ? IBV_ADVISE_MR_ADVICE_PREFETCH_WRITE :
> +                 IBV_ADVISE_MR_ADVICE_PREFETCH;
> +    struct ibv_sge sg_list = {.lkey = lkey, .addr = addr, .length = len};
> +
> +    ret = ibv_advise_mr(pd, advice,
> +                        IBV_ADVISE_MR_FLAG_FLUSH, &sg_list, 1);
> +    /* ignore the error */
> +    if (ret) {
> +        trace_qemu_rdma_advise_mr(name, len, addr, strerror(errno));
> +    } else {
> +        trace_qemu_rdma_advise_mr(name, len, addr, "successed");
> +    }
> +}
> +
>   static int qemu_rdma_reg_whole_ram_blocks(RDMAContext *rdma)
>   {
>       int i;
> @@ -1156,6 +1180,15 @@ static int qemu_rdma_reg_whole_ram_blocks(RDMAContext *rdma)
>                                  local->block[i].local_host_addr,
>                                  local->block[i].length, access);
>                   trace_qemu_rdma_register_odp_mr(local->block[i].block_name);
> +
> +                if (local->block[i].mr) {
> +                    qemu_rdma_advise_prefetch_mr(rdma->pd,
> +                                    (uintptr_t)local->block[i].local_host_addr,
> +                                    local->block[i].length,
> +                                    local->block[i].mr->lkey,
> +                                    local->block[i].block_name,
> +                                    true);
> +                }
>           }
>   
>           if (!local->block[i].mr) {
> @@ -1255,6 +1288,13 @@ static int qemu_rdma_register_and_get_keys(RDMAContext *rdma,
>               /* register ODP mr */
>               block->pmr[chunk] = ibv_reg_mr(rdma->pd, chunk_start, len, access);
>               trace_qemu_rdma_register_odp_mr(block->block_name);
> +
> +            if (block->pmr[chunk]) {
> +                qemu_rdma_advise_prefetch_mr(rdma->pd, (uintptr_t)chunk_start,
> +                                            len, block->pmr[chunk]->lkey,
> +                                            block->block_name, rkey);
> +
> +            }
>           }
>       }
>       if (!block->pmr[chunk]) {
> diff --git a/migration/trace-events b/migration/trace-events
> index 5f6aa580def..a8ae163707c 100644
> --- a/migration/trace-events
> +++ b/migration/trace-events
> @@ -213,6 +213,7 @@ qemu_rdma_poll_other(const char *compstr, int64_t comp, int left) "other complet
>   qemu_rdma_post_send_control(const char *desc) "CONTROL: sending %s.."
>   qemu_rdma_register_and_get_keys(uint64_t len, void *start) "Registering %" PRIu64 " bytes @ %p"
>   qemu_rdma_register_odp_mr(const char *name) "Try to register On-Demand Paging memory region: %s"
> +qemu_rdma_advise_mr(const char *name, uint32_t len, uint64_t addr, const char *res) "Try to advise block %s prefetch at %" PRIu32 "@0x%" PRIx64 ": %s"
>   qemu_rdma_registration_handle_compress(int64_t length, int index, int64_t offset) "Zapping zero chunk: %" PRId64 " bytes, index %d, offset %" PRId64
>   qemu_rdma_registration_handle_finished(void) ""
>   qemu_rdma_registration_handle_ram_blocks(void) ""
diff mbox series

Patch

diff --git a/migration/rdma.c b/migration/rdma.c
index eb80431aae2..6c2cc3f617c 100644
--- a/migration/rdma.c
+++ b/migration/rdma.c
@@ -1133,6 +1133,30 @@  static bool rdma_support_odp(struct ibv_context *dev)
     return false;
 }
 
+/*
+ * ibv_advise_mr to avoid RNR NAK error as far as possible.
+ * The responder mr registering with ODP will sent RNR NAK back to
+ * the requester in the face of the page fault.
+ */
+static void qemu_rdma_advise_prefetch_mr(struct ibv_pd *pd, uint64_t addr,
+                                         uint32_t len,  uint32_t lkey,
+                                         const char *name, bool wr)
+{
+    int ret;
+    int advice = wr ? IBV_ADVISE_MR_ADVICE_PREFETCH_WRITE :
+                 IBV_ADVISE_MR_ADVICE_PREFETCH;
+    struct ibv_sge sg_list = {.lkey = lkey, .addr = addr, .length = len};
+
+    ret = ibv_advise_mr(pd, advice,
+                        IBV_ADVISE_MR_FLAG_FLUSH, &sg_list, 1);
+    /* ignore the error */
+    if (ret) {
+        trace_qemu_rdma_advise_mr(name, len, addr, strerror(errno));
+    } else {
+        trace_qemu_rdma_advise_mr(name, len, addr, "successed");
+    }
+}
+
 static int qemu_rdma_reg_whole_ram_blocks(RDMAContext *rdma)
 {
     int i;
@@ -1156,6 +1180,15 @@  static int qemu_rdma_reg_whole_ram_blocks(RDMAContext *rdma)
                                local->block[i].local_host_addr,
                                local->block[i].length, access);
                 trace_qemu_rdma_register_odp_mr(local->block[i].block_name);
+
+                if (local->block[i].mr) {
+                    qemu_rdma_advise_prefetch_mr(rdma->pd,
+                                    (uintptr_t)local->block[i].local_host_addr,
+                                    local->block[i].length,
+                                    local->block[i].mr->lkey,
+                                    local->block[i].block_name,
+                                    true);
+                }
         }
 
         if (!local->block[i].mr) {
@@ -1255,6 +1288,13 @@  static int qemu_rdma_register_and_get_keys(RDMAContext *rdma,
             /* register ODP mr */
             block->pmr[chunk] = ibv_reg_mr(rdma->pd, chunk_start, len, access);
             trace_qemu_rdma_register_odp_mr(block->block_name);
+
+            if (block->pmr[chunk]) {
+                qemu_rdma_advise_prefetch_mr(rdma->pd, (uintptr_t)chunk_start,
+                                            len, block->pmr[chunk]->lkey,
+                                            block->block_name, rkey);
+
+            }
         }
     }
     if (!block->pmr[chunk]) {
diff --git a/migration/trace-events b/migration/trace-events
index 5f6aa580def..a8ae163707c 100644
--- a/migration/trace-events
+++ b/migration/trace-events
@@ -213,6 +213,7 @@  qemu_rdma_poll_other(const char *compstr, int64_t comp, int left) "other complet
 qemu_rdma_post_send_control(const char *desc) "CONTROL: sending %s.."
 qemu_rdma_register_and_get_keys(uint64_t len, void *start) "Registering %" PRIu64 " bytes @ %p"
 qemu_rdma_register_odp_mr(const char *name) "Try to register On-Demand Paging memory region: %s"
+qemu_rdma_advise_mr(const char *name, uint32_t len, uint64_t addr, const char *res) "Try to advise block %s prefetch at %" PRIu32 "@0x%" PRIx64 ": %s"
 qemu_rdma_registration_handle_compress(int64_t length, int index, int64_t offset) "Zapping zero chunk: %" PRId64 " bytes, index %d, offset %" PRId64
 qemu_rdma_registration_handle_finished(void) ""
 qemu_rdma_registration_handle_ram_blocks(void) ""