Message ID | 46f364ce7878b740e58bf44d3bed5fe23c64a260.1660152975.git.alibuda@linux.alibaba.com (mailing list archive) |
---|---|
State | Not Applicable |
Headers | show |
Series | net/smc: optimize the parallelism of SMC-R connections | expand |
On Thu, Aug 11, 2022 at 01:47:38AM +0800, D. Wythe wrote: > From: "D. Wythe" <alibuda@linux.alibaba.com> > > Unlike smc_buf_create() and smcr_buf_unuse(), smcr_lgr_reg_rmbs() is > exclusive when assigned rmb_desc was not registered, although it can be > executed in parallel when assigned rmb_desc was registered already > and only performs read semtamics on it. Hence, we can not simply replace > it with read semaphore. > > The idea here is that if the assigned rmb_desc was registered already, > use read semaphore to protect the critical section, once the assigned > rmb_desc was not registered, keep using keep write semaphore still > to keep its exclusivity. > > Thanks to the reusable features of rmb_desc, which allows us to execute > in parallel in most cases. > > Signed-off-by: D. Wythe <alibuda@linux.alibaba.com> > --- > net/smc/af_smc.c | 19 +++++++++++++++++-- > 1 file changed, 17 insertions(+), 2 deletions(-) > > diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c > index 51b90e2..39dbf39 100644 > --- a/net/smc/af_smc.c > +++ b/net/smc/af_smc.c > @@ -516,10 +516,25 @@ static int smcr_lgr_reg_rmbs(struct smc_link *link, > { > struct smc_link_group *lgr = link->lgr; > int i, rc = 0; > + bool slow = false; Consider do_slow? Reverse Christmas tree. > > rc = smc_llc_flow_initiate(lgr, SMC_LLC_FLOW_RKEY); > if (rc) > return rc; > + > + down_read(&lgr->llc_conf_mutex); > + for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { > + if (!smc_link_active(&lgr->lnk[i])) > + continue; > + if (!rmb_desc->is_reg_mr[link->link_idx]) { > + up_read(&lgr->llc_conf_mutex); > + goto slow_path; > + } > + } > + /* mr register already */ > + goto fast_path; > +slow_path: > + slow = true; > /* protect against parallel smc_llc_cli_rkey_exchange() and > * parallel smcr_link_reg_buf() > */ > @@ -531,7 +546,7 @@ static int smcr_lgr_reg_rmbs(struct smc_link *link, > if (rc) > goto out; > } > - > +fast_path: > /* exchange confirm_rkey msg with peer */ > rc = smc_llc_do_confirm_rkey(link, rmb_desc); > if (rc) { > @@ -540,7 +555,7 @@ static int smcr_lgr_reg_rmbs(struct smc_link *link, > } > rmb_desc->is_conf_rkey = true; > out: > - up_write(&lgr->llc_conf_mutex); > + slow ? up_write(&lgr->llc_conf_mutex) : up_read(&lgr->llc_conf_mutex); > smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl); > return rc; > } > -- > 1.8.3.1
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 51b90e2..39dbf39 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -516,10 +516,25 @@ static int smcr_lgr_reg_rmbs(struct smc_link *link, { struct smc_link_group *lgr = link->lgr; int i, rc = 0; + bool slow = false; rc = smc_llc_flow_initiate(lgr, SMC_LLC_FLOW_RKEY); if (rc) return rc; + + down_read(&lgr->llc_conf_mutex); + for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { + if (!smc_link_active(&lgr->lnk[i])) + continue; + if (!rmb_desc->is_reg_mr[link->link_idx]) { + up_read(&lgr->llc_conf_mutex); + goto slow_path; + } + } + /* mr register already */ + goto fast_path; +slow_path: + slow = true; /* protect against parallel smc_llc_cli_rkey_exchange() and * parallel smcr_link_reg_buf() */ @@ -531,7 +546,7 @@ static int smcr_lgr_reg_rmbs(struct smc_link *link, if (rc) goto out; } - +fast_path: /* exchange confirm_rkey msg with peer */ rc = smc_llc_do_confirm_rkey(link, rmb_desc); if (rc) { @@ -540,7 +555,7 @@ static int smcr_lgr_reg_rmbs(struct smc_link *link, } rmb_desc->is_conf_rkey = true; out: - up_write(&lgr->llc_conf_mutex); + slow ? up_write(&lgr->llc_conf_mutex) : up_read(&lgr->llc_conf_mutex); smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl); return rc; }