diff mbox

[8/8] IB/srp: Drain the send queue before destroying a QP

Message ID 20170210235611.3243-9-bart.vanassche@sandisk.com (mailing list archive)
State Superseded
Headers show

Commit Message

Bart Van Assche Feb. 10, 2017, 11:56 p.m. UTC
A quote from the IB spec:

However, if the Consumer does not wait for the Affiliated Asynchronous
Last WQE Reached Event, then WQE and Data Segment leakage may occur.
Therefore, it is good programming practice to tear down a QP that is
associated with an SRQ by using the following process:
* Put the QP in the Error State;
* wait for the Affiliated Asynchronous Last WQE Reached Event;
* either:
  * drain the CQ by invoking the Poll CQ verb and either wait for CQ
    to be empty or the number of Poll CQ operations has exceeded CQ
    capacity size; or
  * post another WR that completes on the same CQ and wait for this WR to return as a WC;
* and then invoke a Destroy QP or Reset QP.

Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Israel Rukshin <israelr@mellanox.com>
Cc: Max Gurtovoy <maxg@mellanox.com>
Cc: Laurence Oberman <loberman@redhat.com>
---
 drivers/infiniband/ulp/srp/ib_srp.c | 19 ++++++++++++++-----
 1 file changed, 14 insertions(+), 5 deletions(-)

Comments

Robert LeBlanc Feb. 11, 2017, 12:07 a.m. UTC | #1
On Fri, Feb 10, 2017 at 4:56 PM, Bart Van Assche
<bart.vanassche@sandisk.com> wrote:
> A quote from the IB spec:
>
> However, if the Consumer does not wait for the Affiliated Asynchronous
> Last WQE Reached Event, then WQE and Data Segment leakage may occur.
> Therefore, it is good programming practice to tear down a QP that is
> associated with an SRQ by using the following process:
> * Put the QP in the Error State;
> * wait for the Affiliated Asynchronous Last WQE Reached Event;
> * either:
>   * drain the CQ by invoking the Poll CQ verb and either wait for CQ
>     to be empty or the number of Poll CQ operations has exceeded CQ
>     capacity size; or
>   * post another WR that completes on the same CQ and wait for this WR to return as a WC;
> * and then invoke a Destroy QP or Reset QP.
>
> Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com>
> Cc: Christoph Hellwig <hch@lst.de>
> Cc: Israel Rukshin <israelr@mellanox.com>
> Cc: Max Gurtovoy <maxg@mellanox.com>
> Cc: Laurence Oberman <loberman@redhat.com>
> ---
>  drivers/infiniband/ulp/srp/ib_srp.c | 19 ++++++++++++++-----
>  1 file changed, 14 insertions(+), 5 deletions(-)
>
> diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
> index 2f85255d2aca..b50733910f7e 100644
> --- a/drivers/infiniband/ulp/srp/ib_srp.c
> +++ b/drivers/infiniband/ulp/srp/ib_srp.c
> @@ -471,9 +471,13 @@ static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
>   * completion handler can access the queue pair while it is
>   * being destroyed.
>   */
> -static void srp_destroy_qp(struct ib_qp *qp)
> +static void srp_destroy_qp(struct srp_rdma_ch *ch, struct ib_qp *qp)
>  {
> -       ib_drain_rq(qp);
> +       spin_lock_irq(&ch->lock);
> +       ib_process_cq_direct(ch->send_cq, -1);
> +       spin_unlock_irq(&ch->lock);
> +
> +       ib_drain_qp(qp);
>         ib_destroy_qp(qp);
>  }
>
> @@ -547,7 +551,7 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
>         }
>
>         if (ch->qp)
> -               srp_destroy_qp(ch->qp);
> +               srp_destroy_qp(ch, ch->qp);
>         if (ch->recv_cq)
>                 ib_free_cq(ch->recv_cq);
>         if (ch->send_cq)
> @@ -571,7 +575,7 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
>         return 0;
>
>  err_qp:
> -       srp_destroy_qp(qp);
> +       srp_destroy_qp(ch, qp);
>
>  err_send_cq:
>         ib_free_cq(send_cq);
> @@ -614,7 +618,7 @@ static void srp_free_ch_ib(struct srp_target_port *target,
>                         ib_destroy_fmr_pool(ch->fmr_pool);
>         }
>
> -       srp_destroy_qp(ch->qp);
> +       srp_destroy_qp(ch, ch->qp);
>         ib_free_cq(ch->send_cq);
>         ib_free_cq(ch->recv_cq);
>
> @@ -1827,6 +1831,11 @@ static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
>         return iu;
>  }
>
> +/*
> + * Note: if this function is called from inside ib_drain_sq() then it will

Don't you mean outside of ib_drain_sq?

> + * be called without ch->lock being held. If ib_drain_sq() dequeues a WQE
> + * with status IB_WC_SUCCESS then that's a bug.
> + */
>  static void srp_send_done(struct ib_cq *cq, struct ib_wc *wc)
>  {
>         struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
> --
> 2.11.0
>
> --
> To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html

Sagi,

Does something like this need to happen for iSER as well? Maybe it
could help with the D state problem?

----------------
Robert LeBlanc
PGP Fingerprint 79A2 9CA4 6CC4 45DD A904  C70E E654 3BB2 FA62 B9F1
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Bart Van Assche Feb. 11, 2017, 12:13 a.m. UTC | #2
On Fri, 2017-02-10 at 17:07 -0700, Robert LeBlanc wrote:
> > +/*
> > + * Note: if this function is called from inside ib_drain_sq() then it will
> 
> Don't you mean outside of ib_drain_sq?

I meant inside.

Bart.--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Leon Romanovsky Feb. 12, 2017, 5:19 p.m. UTC | #3
On Fri, Feb 10, 2017 at 03:56:11PM -0800, Bart Van Assche wrote:
> A quote from the IB spec:
>
> However, if the Consumer does not wait for the Affiliated Asynchronous
> Last WQE Reached Event, then WQE and Data Segment leakage may occur.
> Therefore, it is good programming practice to tear down a QP that is
> associated with an SRQ by using the following process:
> * Put the QP in the Error State;
> * wait for the Affiliated Asynchronous Last WQE Reached Event;
> * either:
>   * drain the CQ by invoking the Poll CQ verb and either wait for CQ
>     to be empty or the number of Poll CQ operations has exceeded CQ
>     capacity size; or
>   * post another WR that completes on the same CQ and wait for this WR to return as a WC;
> * and then invoke a Destroy QP or Reset QP.
>
> Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com>
> Cc: Christoph Hellwig <hch@lst.de>
> Cc: Israel Rukshin <israelr@mellanox.com>
> Cc: Max Gurtovoy <maxg@mellanox.com>
> Cc: Laurence Oberman <loberman@redhat.com>
> ---
>  drivers/infiniband/ulp/srp/ib_srp.c | 19 ++++++++++++++-----
>  1 file changed, 14 insertions(+), 5 deletions(-)
>
> diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
> index 2f85255d2aca..b50733910f7e 100644
> --- a/drivers/infiniband/ulp/srp/ib_srp.c
> +++ b/drivers/infiniband/ulp/srp/ib_srp.c
> @@ -471,9 +471,13 @@ static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
>   * completion handler can access the queue pair while it is
>   * being destroyed.
>   */
> -static void srp_destroy_qp(struct ib_qp *qp)
> +static void srp_destroy_qp(struct srp_rdma_ch *ch, struct ib_qp *qp)
>  {
> -	ib_drain_rq(qp);
> +	spin_lock_irq(&ch->lock);
> +	ib_process_cq_direct(ch->send_cq, -1);

I see that you are already using "-1" in your code, but the comments in the
ib_process_cq_direct states that no new code should use "-1".

 61  * Note: for compatibility reasons -1 can be passed in %budget for unlimited
 62  * polling.  Do not use this feature in new code, it will be removed soon.
 63  */
 64 int ib_process_cq_direct(struct ib_cq *cq, int budget)

Thanks
Laurence Oberman Feb. 12, 2017, 6:02 p.m. UTC | #4
----- Original Message -----
> From: "Leon Romanovsky" <leon@kernel.org>
> To: "Bart Van Assche" <bart.vanassche@sandisk.com>
> Cc: "Doug Ledford" <dledford@redhat.com>, linux-rdma@vger.kernel.org, "Christoph Hellwig" <hch@lst.de>, "Israel
> Rukshin" <israelr@mellanox.com>, "Max Gurtovoy" <maxg@mellanox.com>, "Laurence Oberman" <loberman@redhat.com>
> Sent: Sunday, February 12, 2017 12:19:28 PM
> Subject: Re: [PATCH 8/8] IB/srp: Drain the send queue before destroying a QP
> 
> On Fri, Feb 10, 2017 at 03:56:11PM -0800, Bart Van Assche wrote:
> > A quote from the IB spec:
> >
> > However, if the Consumer does not wait for the Affiliated Asynchronous
> > Last WQE Reached Event, then WQE and Data Segment leakage may occur.
> > Therefore, it is good programming practice to tear down a QP that is
> > associated with an SRQ by using the following process:
> > * Put the QP in the Error State;
> > * wait for the Affiliated Asynchronous Last WQE Reached Event;
> > * either:
> >   * drain the CQ by invoking the Poll CQ verb and either wait for CQ
> >     to be empty or the number of Poll CQ operations has exceeded CQ
> >     capacity size; or
> >   * post another WR that completes on the same CQ and wait for this WR to
> >   return as a WC;
> > * and then invoke a Destroy QP or Reset QP.
> >
> > Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com>
> > Cc: Christoph Hellwig <hch@lst.de>
> > Cc: Israel Rukshin <israelr@mellanox.com>
> > Cc: Max Gurtovoy <maxg@mellanox.com>
> > Cc: Laurence Oberman <loberman@redhat.com>
> > ---
> >  drivers/infiniband/ulp/srp/ib_srp.c | 19 ++++++++++++++-----
> >  1 file changed, 14 insertions(+), 5 deletions(-)
> >
> > diff --git a/drivers/infiniband/ulp/srp/ib_srp.c
> > b/drivers/infiniband/ulp/srp/ib_srp.c
> > index 2f85255d2aca..b50733910f7e 100644
> > --- a/drivers/infiniband/ulp/srp/ib_srp.c
> > +++ b/drivers/infiniband/ulp/srp/ib_srp.c
> > @@ -471,9 +471,13 @@ static struct srp_fr_pool *srp_alloc_fr_pool(struct
> > srp_target_port *target)
> >   * completion handler can access the queue pair while it is
> >   * being destroyed.
> >   */
> > -static void srp_destroy_qp(struct ib_qp *qp)
> > +static void srp_destroy_qp(struct srp_rdma_ch *ch, struct ib_qp *qp)
> >  {
> > -	ib_drain_rq(qp);
> > +	spin_lock_irq(&ch->lock);
> > +	ib_process_cq_direct(ch->send_cq, -1);
> 
> I see that you are already using "-1" in your code, but the comments in the
> ib_process_cq_direct states that no new code should use "-1".
> 
>  61  * Note: for compatibility reasons -1 can be passed in %budget for
>  unlimited
>  62  * polling.  Do not use this feature in new code, it will be removed
>  soon.
>  63  */
>  64 int ib_process_cq_direct(struct ib_cq *cq, int budget)
> 
> Thanks
> 

Hello Bart

I took latest for-next from your git tree and started the fist set of tests.

I bumped into this very quickly, but I only am running the new code on the client.
The server has not been updated.

On the client I see this after starting a single write thread to and XFS on on eof the mpaths.
Given its in ib_strain figured I would let you know now.


[  850.862430] scsi host1: ib_srp: Out of MRs (mr_per_cmd = 1)
[  850.865203] scsi host1: ib_srp: failed RECV status WR flushed (5) for CQE ffff8817f3d94a30
[  850.941454] scsi host1: ib_srp: Failed to map data (-12)
[  860.990411] mlx5_0:dump_cqe:262:(pid 1103): dump error cqe
[  861.019162] 00000000 00000000 00000000 00000000
[  861.042085] 00000000 00000000 00000000 00000000
[  861.066567] 00000000 00000000 00000000 00000000
[  861.092164] 00000000 0f007806 2500002a cefe87d1
[  861.117091] ------------[ cut here ]------------
[  861.143141] WARNING: CPU: 27 PID: 1103 at drivers/infiniband/core/verbs.c:1959 __ib_drain_sq+0x1bb/0x1c0 [ib_core]
[  861.202208] IB_POLL_DIRECT poll_ctx not supported for drain
[  861.235179] Modules linked in: dm_service_time xt_CHECKSUM ipt_MASQUERADE nf_nat_masquerade_ipv4 tun ip6t_rpfilter ipt_REJECT nf_reject_ipv4 ip6t_REJECT nf_reject_ipv6 xt_conntrack ip_set nfnetlink ebtable_nat ebtable_broute bridge stp llc ip6table_nat nf_conntrack_ipv6 nf_defrag_ipv6 nf_nat_ipv6 ip6table_mangle ip6table_security ip6table_raw iptable_nat nf_conntrack_ipv4 nf_defrag_ipv4 nf_nat_ipv4 nf_nat rpcrdma nf_conntrack ib_isert iscsi_target_mod iptable_mangle iptable_security iptable_raw ebtable_filter ib_iser ebtables libiscsi ip6table_filter ip6_tables scsi_transport_iscsi iptable_filter target_core_mod ib_srp scsi_transport_srp ib_ipoib rdma_ucm ib_ucm ib_uverbs ib_umad rdma_cm ib_cm iw_cm mlx5_ib ib_core intel_powerclamp coretemp kvm_intel kvm irqbypass crct10dif_pclmul crc32_pclmul ghash_clmulni_intel
[  861.646587]  pcbc aesni_intel crypto_simd ipmi_ssif glue_helper ipmi_si cryptd iTCO_wdt gpio_ich ipmi_devintf iTCO_vendor_support pcspkr hpwdt hpilo pcc_cpufreq sg ipmi_msghandler acpi_power_meter i7core_edac acpi_cpufreq shpchp edac_core lpc_ich nfsd auth_rpcgss nfs_acl lockd grace sunrpc dm_multipath ip_tables xfs libcrc32c amdkfd amd_iommu_v2 radeon i2c_algo_bit drm_kms_helper syscopyarea sd_mod sysfillrect sysimgblt fb_sys_fops ttm mlx5_core drm ptp fjes hpsa crc32c_intel serio_raw i2c_core pps_core bnx2 devlink scsi_transport_sas dm_mirror dm_region_hash dm_log dm_mod [last unloaded: ib_srpt]
[  861.943997] CPU: 27 PID: 1103 Comm: kworker/27:2 Tainted: G          I     4.10.0-rc7+ #1
[  861.989476] Hardware name: HP ProLiant DL380 G7, BIOS P67 08/16/2015
[  862.024833] Workqueue: events_long srp_reconnect_work [scsi_transport_srp]
[  862.063004] Call Trace:
[  862.076516]  dump_stack+0x63/0x87
[  862.094841]  __warn+0xd1/0xf0
[  862.112164]  warn_slowpath_fmt+0x5f/0x80
[  862.134013]  ? mlx5_poll_one+0x59/0xa40 [mlx5_ib]
[  862.161124]  __ib_drain_sq+0x1bb/0x1c0 [ib_core]
[  862.187702]  ib_drain_sq+0x25/0x30 [ib_core]
[  862.212168]  ib_drain_qp+0x12/0x30 [ib_core]
[  862.238138]  srp_destroy_qp+0x47/0x60 [ib_srp]
[  862.264155]  srp_create_ch_ib+0x26f/0x5f0 [ib_srp]
[  862.291646]  ? scsi_done+0x21/0x70
[  862.312392]  ? srp_finish_req+0x93/0xb0 [ib_srp]
[  862.338654]  srp_rport_reconnect+0xf0/0x1f0 [ib_srp]
[  862.366274]  srp_reconnect_rport+0xca/0x220 [scsi_transport_srp]
[  862.400756]  srp_reconnect_work+0x44/0xd1 [scsi_transport_srp]
[  862.434277]  process_one_work+0x165/0x410
[  862.456198]  worker_thread+0x137/0x4c0
[  862.476973]  kthread+0x101/0x140
[  862.493935]  ? rescuer_thread+0x3b0/0x3b0
[  862.516800]  ? kthread_park+0x90/0x90
[  862.537396]  ? do_syscall_64+0x67/0x180
[  862.558477]  ret_from_fork+0x2c/0x40
[  862.578161] ---[ end trace 2a6c2779f0a2d28f ]---
[  864.274137] scsi host1: ib_srp: reconnect succeeded
[  864.306836] scsi host1: ib_srp: Out of MRs (mr_per_cmd = 1)
[  864.310916] mlx5_0:dump_cqe:262:(pid 13776): dump error cqe
[  864.310917] 00000000 00000000 00000000 00000000
[  864.310921] 00000000 00000000 00000000 00000000
[  864.310922] 00000000 00000000 00000000 00000000
[  864.310922] 00000000 0f007806 25000032 00044cd0
[  864.310928] scsi host1: ib_srp: failed FAST REG status memory management operation error (6) for CQE ffff880b94268078
[  864.527890] scsi host1: ib_srp: Failed to map data (-12)
[  876.101124] scsi host1: ib_srp: reconnect succeeded
[  876.133923] scsi host1: ib_srp: Out of MRs (mr_per_cmd = 1)
[  876.135014] scsi host1: ib_srp: failed RECV status WR flushed (5) for CQE ffff880bf1939130
[  876.210311] scsi host1: ib_srp: Failed to map data (-12)
[  876.239985] mlx5_0:dump_cqe:262:(pid 5945): dump error cqe
[  876.270855] 00000000 00000000 00000000 00000000
[  876.296525] 00000000 00000000 00000000 00000000
[  876.322500] 00000000 00000000 00000000 00000000
[  876.348519] 00000000 0f007806 2500003a 0080e1d0
[  887.784981] scsi host1: ib_srp: reconnect succeeded
[  887.819808] scsi host1: ib_srp: Out of MRs (mr_per_cmd = 1)
[  887.851777] scsi host1: ib_srp: failed RECV status WR flushed (5) for CQE ffff880bf1939130
[  887.898850] scsi host1: ib_srp: Failed to map data (-12)
[  887.928647] mlx5_0:dump_cqe:262:(pid 7327): dump error cqe
[  887.959938] 00000000 00000000 00000000 00000000
[  887.985041] 00000000 00000000 00000000 00000000
[  888.010619] 00000000 00000000 00000000 00000000
[  888.035601] 00000000 0f007806 25000042 008099d0
[  899.546781] scsi host1: ib_srp: reconnect succeeded
[  899.580758] scsi host1: ib_srp: Out of MRs (mr_per_cmd = 1)
[  899.611289] scsi host1: ib_srp: failed RECV status WR flushed (5) for CQE ffff880bf1939130
[  899.658289] scsi host1: ib_srp: Failed to map data (-12)
[  899.687219] mlx5_0:dump_cqe:262:(pid 7327): dump error cqe
[  899.718736] 00000000 00000000 00000000 00000000
[  899.744137] 00000000 00000000 00000000 00000000
[  899.769206] 00000000 00000000 00000000 00000000
[  899.795217] 00000000 0f007806 2500004a 008091d0
[  911.343869] scsi host1: ib_srp: reconnect succeeded
[  911.376684] scsi host1: ib_srp: Out of MRs (mr_per_cmd = 1)
[  911.407755] scsi host1: ib_srp: failed RECV status WR flushed (5) for CQE ffff880bf1939130
[  911.454474] scsi host1: ib_srp: Failed to map data (-12)
[  911.484279] mlx5_0:dump_cqe:262:(pid 7327): dump error cqe
[  911.514784] 00000000 00000000 00000000 00000000
[  911.540251] 00000000 00000000 00000000 00000000
[  911.564841] 00000000 00000000 00000000 00000000
[  911.590743] 00000000 0f007806 25000052 008089d0
[  923.066748] scsi host1: ib_srp: reconnect succeeded
[  923.099656] scsi host1: ib_srp: Out of MRs (mr_per_cmd = 1)
[  923.131825] scsi host1: ib_srp: failed RECV status WR flushed (5) for CQE ffff880bf1939130
[  923.179514] scsi host1: ib_srp: Failed to map data (-12)
[  923.209307] mlx5_0:dump_cqe:262:(pid 7327): dump error cqe
[  923.239986] 00000000 00000000 00000000 00000000
[  923.265419] 00000000 00000000 00000000 00000000
[  923.290102] 00000000 00000000 00000000 00000000
[  923.315120] 00000000 0f007806 2500005a 00c4d4d0
[  934.839336] scsi host1: ib_srp: reconnect succeeded
[  934.874582] scsi host1: ib_srp: Out of MRs (mr_per_cmd = 1)
[  934.906298] scsi host1: ib_srp: failed RECV status WR flushed (5) for CQE ffff880bf1939130
[  934.953712] scsi host1: ib_srp: Failed to map data (-12)
[  934.983829] mlx5_0:dump_cqe:262:(pid 7327): dump error cqe
[  935.015371] 00000000 00000000 00000000 00000000
[  935.041544] 00000000 00000000 00000000 00000000
[  935.066883] 00000000 00000000 00000000 00000000
[  935.092755] 00000000 0f007806 25000062 00c4ecd0
[  946.610744] scsi host1: ib_srp: reconnect succeeded
[  946.644528] scsi host1: ib_srp: Out of MRs (mr_per_cmd = 1)
[  946.647935] mlx5_0:dump_cqe:262:(pid 752): dump error cqe
[  946.647936] 00000000 00000000 00000000 00000000
[  946.647937] 00000000 00000000 00000000 00000000
[  946.647937] 00000000 00000000 00000000 00000000
[  946.647938] 00000000 0f007806 2500006a 00c4e4d0
[  946.647940] scsi host1: ib_srp: failed FAST REG status memory management operation error (6) for CQE ffff880b94268c78
[  946.869439] scsi host1: ib_srp: Failed to map data (-12)

I will reset and restart to make sure this issue is repeatable.

Thanks
Laurence
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Laurence Oberman Feb. 12, 2017, 6:06 p.m. UTC | #5
----- Original Message -----
> From: "Laurence Oberman" <loberman@redhat.com>
> To: "Leon Romanovsky" <leon@kernel.org>
> Cc: "Bart Van Assche" <bart.vanassche@sandisk.com>, "Doug Ledford" <dledford@redhat.com>, linux-rdma@vger.kernel.org,
> "Christoph Hellwig" <hch@lst.de>, "Israel Rukshin" <israelr@mellanox.com>, "Max Gurtovoy" <maxg@mellanox.com>
> Sent: Sunday, February 12, 2017 1:02:53 PM
> Subject: Re: [PATCH 8/8] IB/srp: Drain the send queue before destroying a QP
> 
> ----- Original Message -----
> > From: "Leon Romanovsky" <leon@kernel.org>
> > To: "Bart Van Assche" <bart.vanassche@sandisk.com>
> > Cc: "Doug Ledford" <dledford@redhat.com>, linux-rdma@vger.kernel.org,
> > "Christoph Hellwig" <hch@lst.de>, "Israel
> > Rukshin" <israelr@mellanox.com>, "Max Gurtovoy" <maxg@mellanox.com>,
> > "Laurence Oberman" <loberman@redhat.com>
> > Sent: Sunday, February 12, 2017 12:19:28 PM
> > Subject: Re: [PATCH 8/8] IB/srp: Drain the send queue before destroying a
> > QP
> > 
> > On Fri, Feb 10, 2017 at 03:56:11PM -0800, Bart Van Assche wrote:
> > > A quote from the IB spec:
> > >
> > > However, if the Consumer does not wait for the Affiliated Asynchronous
> > > Last WQE Reached Event, then WQE and Data Segment leakage may occur.
> > > Therefore, it is good programming practice to tear down a QP that is
> > > associated with an SRQ by using the following process:
> > > * Put the QP in the Error State;
> > > * wait for the Affiliated Asynchronous Last WQE Reached Event;
> > > * either:
> > >   * drain the CQ by invoking the Poll CQ verb and either wait for CQ
> > >     to be empty or the number of Poll CQ operations has exceeded CQ
> > >     capacity size; or
> > >   * post another WR that completes on the same CQ and wait for this WR to
> > >   return as a WC;
> > > * and then invoke a Destroy QP or Reset QP.
> > >
> > > Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com>
> > > Cc: Christoph Hellwig <hch@lst.de>
> > > Cc: Israel Rukshin <israelr@mellanox.com>
> > > Cc: Max Gurtovoy <maxg@mellanox.com>
> > > Cc: Laurence Oberman <loberman@redhat.com>
> > > ---
> > >  drivers/infiniband/ulp/srp/ib_srp.c | 19 ++++++++++++++-----
> > >  1 file changed, 14 insertions(+), 5 deletions(-)
> > >
> > > diff --git a/drivers/infiniband/ulp/srp/ib_srp.c
> > > b/drivers/infiniband/ulp/srp/ib_srp.c
> > > index 2f85255d2aca..b50733910f7e 100644
> > > --- a/drivers/infiniband/ulp/srp/ib_srp.c
> > > +++ b/drivers/infiniband/ulp/srp/ib_srp.c
> > > @@ -471,9 +471,13 @@ static struct srp_fr_pool *srp_alloc_fr_pool(struct
> > > srp_target_port *target)
> > >   * completion handler can access the queue pair while it is
> > >   * being destroyed.
> > >   */
> > > -static void srp_destroy_qp(struct ib_qp *qp)
> > > +static void srp_destroy_qp(struct srp_rdma_ch *ch, struct ib_qp *qp)
> > >  {
> > > -	ib_drain_rq(qp);
> > > +	spin_lock_irq(&ch->lock);
> > > +	ib_process_cq_direct(ch->send_cq, -1);
> > 
> > I see that you are already using "-1" in your code, but the comments in the
> > ib_process_cq_direct states that no new code should use "-1".
> > 
> >  61  * Note: for compatibility reasons -1 can be passed in %budget for
> >  unlimited
> >  62  * polling.  Do not use this feature in new code, it will be removed
> >  soon.
> >  63  */
> >  64 int ib_process_cq_direct(struct ib_cq *cq, int budget)
> > 
> > Thanks
> > 
> 
> Hello Bart
> 
> I took latest for-next from your git tree and started the fist set of tests.
> 
> I bumped into this very quickly, but I only am running the new code on the
> client.
> The server has not been updated.
> 
> On the client I see this after starting a single write thread to and XFS on
> on eof the mpaths.
> Given its in ib_strain figured I would let you know now.
> 
> 
> [  850.862430] scsi host1: ib_srp: Out of MRs (mr_per_cmd = 1)
> [  850.865203] scsi host1: ib_srp: failed RECV status WR flushed (5) for CQE
> ffff8817f3d94a30
> [  850.941454] scsi host1: ib_srp: Failed to map data (-12)
> [  860.990411] mlx5_0:dump_cqe:262:(pid 1103): dump error cqe
> [  861.019162] 00000000 00000000 00000000 00000000
> [  861.042085] 00000000 00000000 00000000 00000000
> [  861.066567] 00000000 00000000 00000000 00000000
> [  861.092164] 00000000 0f007806 2500002a cefe87d1
> [  861.117091] ------------[ cut here ]------------
> [  861.143141] WARNING: CPU: 27 PID: 1103 at
> drivers/infiniband/core/verbs.c:1959 __ib_drain_sq+0x1bb/0x1c0 [ib_core]
> [  861.202208] IB_POLL_DIRECT poll_ctx not supported for drain
> [  861.235179] Modules linked in: dm_service_time xt_CHECKSUM ipt_MASQUERADE
> nf_nat_masquerade_ipv4 tun ip6t_rpfilter ipt_REJECT nf_reject_ipv4
> ip6t_REJECT nf_reject_ipv6 xt_conntrack ip_set nfnetlink ebtable_nat
> ebtable_broute bridge stp llc ip6table_nat nf_conntrack_ipv6 nf_defrag_ipv6
> nf_nat_ipv6 ip6table_mangle ip6table_security ip6table_raw iptable_nat
> nf_conntrack_ipv4 nf_defrag_ipv4 nf_nat_ipv4 nf_nat rpcrdma nf_conntrack
> ib_isert iscsi_target_mod iptable_mangle iptable_security iptable_raw
> ebtable_filter ib_iser ebtables libiscsi ip6table_filter ip6_tables
> scsi_transport_iscsi iptable_filter target_core_mod ib_srp
> scsi_transport_srp ib_ipoib rdma_ucm ib_ucm ib_uverbs ib_umad rdma_cm ib_cm
> iw_cm mlx5_ib ib_core intel_powerclamp coretemp kvm_intel kvm irqbypass
> crct10dif_pclmul crc32_pclmul ghash_clmulni_intel
> [  861.646587]  pcbc aesni_intel crypto_simd ipmi_ssif glue_helper ipmi_si
> cryptd iTCO_wdt gpio_ich ipmi_devintf iTCO_vendor_support pcspkr hpwdt hpilo
> pcc_cpufreq sg ipmi_msghandler acpi_power_meter i7core_edac acpi_cpufreq
> shpchp edac_core lpc_ich nfsd auth_rpcgss nfs_acl lockd grace sunrpc
> dm_multipath ip_tables xfs libcrc32c amdkfd amd_iommu_v2 radeon i2c_algo_bit
> drm_kms_helper syscopyarea sd_mod sysfillrect sysimgblt fb_sys_fops ttm
> mlx5_core drm ptp fjes hpsa crc32c_intel serio_raw i2c_core pps_core bnx2
> devlink scsi_transport_sas dm_mirror dm_region_hash dm_log dm_mod [last
> unloaded: ib_srpt]
> [  861.943997] CPU: 27 PID: 1103 Comm: kworker/27:2 Tainted: G          I
> 4.10.0-rc7+ #1
> [  861.989476] Hardware name: HP ProLiant DL380 G7, BIOS P67 08/16/2015
> [  862.024833] Workqueue: events_long srp_reconnect_work [scsi_transport_srp]
> [  862.063004] Call Trace:
> [  862.076516]  dump_stack+0x63/0x87
> [  862.094841]  __warn+0xd1/0xf0
> [  862.112164]  warn_slowpath_fmt+0x5f/0x80
> [  862.134013]  ? mlx5_poll_one+0x59/0xa40 [mlx5_ib]
> [  862.161124]  __ib_drain_sq+0x1bb/0x1c0 [ib_core]
> [  862.187702]  ib_drain_sq+0x25/0x30 [ib_core]
> [  862.212168]  ib_drain_qp+0x12/0x30 [ib_core]
> [  862.238138]  srp_destroy_qp+0x47/0x60 [ib_srp]
> [  862.264155]  srp_create_ch_ib+0x26f/0x5f0 [ib_srp]
> [  862.291646]  ? scsi_done+0x21/0x70
> [  862.312392]  ? srp_finish_req+0x93/0xb0 [ib_srp]
> [  862.338654]  srp_rport_reconnect+0xf0/0x1f0 [ib_srp]
> [  862.366274]  srp_reconnect_rport+0xca/0x220 [scsi_transport_srp]
> [  862.400756]  srp_reconnect_work+0x44/0xd1 [scsi_transport_srp]
> [  862.434277]  process_one_work+0x165/0x410
> [  862.456198]  worker_thread+0x137/0x4c0
> [  862.476973]  kthread+0x101/0x140
> [  862.493935]  ? rescuer_thread+0x3b0/0x3b0
> [  862.516800]  ? kthread_park+0x90/0x90
> [  862.537396]  ? do_syscall_64+0x67/0x180
> [  862.558477]  ret_from_fork+0x2c/0x40
> [  862.578161] ---[ end trace 2a6c2779f0a2d28f ]---
> [  864.274137] scsi host1: ib_srp: reconnect succeeded
> [  864.306836] scsi host1: ib_srp: Out of MRs (mr_per_cmd = 1)
> [  864.310916] mlx5_0:dump_cqe:262:(pid 13776): dump error cqe
> [  864.310917] 00000000 00000000 00000000 00000000
> [  864.310921] 00000000 00000000 00000000 00000000
> [  864.310922] 00000000 00000000 00000000 00000000
> [  864.310922] 00000000 0f007806 25000032 00044cd0
> [  864.310928] scsi host1: ib_srp: failed FAST REG status memory management
> operation error (6) for CQE ffff880b94268078
> [  864.527890] scsi host1: ib_srp: Failed to map data (-12)
> [  876.101124] scsi host1: ib_srp: reconnect succeeded
> [  876.133923] scsi host1: ib_srp: Out of MRs (mr_per_cmd = 1)
> [  876.135014] scsi host1: ib_srp: failed RECV status WR flushed (5) for CQE
> ffff880bf1939130
> [  876.210311] scsi host1: ib_srp: Failed to map data (-12)
> [  876.239985] mlx5_0:dump_cqe:262:(pid 5945): dump error cqe
> [  876.270855] 00000000 00000000 00000000 00000000
> [  876.296525] 00000000 00000000 00000000 00000000
> [  876.322500] 00000000 00000000 00000000 00000000
> [  876.348519] 00000000 0f007806 2500003a 0080e1d0
> [  887.784981] scsi host1: ib_srp: reconnect succeeded
> [  887.819808] scsi host1: ib_srp: Out of MRs (mr_per_cmd = 1)
> [  887.851777] scsi host1: ib_srp: failed RECV status WR flushed (5) for CQE
> ffff880bf1939130
> [  887.898850] scsi host1: ib_srp: Failed to map data (-12)
> [  887.928647] mlx5_0:dump_cqe:262:(pid 7327): dump error cqe
> [  887.959938] 00000000 00000000 00000000 00000000
> [  887.985041] 00000000 00000000 00000000 00000000
> [  888.010619] 00000000 00000000 00000000 00000000
> [  888.035601] 00000000 0f007806 25000042 008099d0
> [  899.546781] scsi host1: ib_srp: reconnect succeeded
> [  899.580758] scsi host1: ib_srp: Out of MRs (mr_per_cmd = 1)
> [  899.611289] scsi host1: ib_srp: failed RECV status WR flushed (5) for CQE
> ffff880bf1939130
> [  899.658289] scsi host1: ib_srp: Failed to map data (-12)
> [  899.687219] mlx5_0:dump_cqe:262:(pid 7327): dump error cqe
> [  899.718736] 00000000 00000000 00000000 00000000
> [  899.744137] 00000000 00000000 00000000 00000000
> [  899.769206] 00000000 00000000 00000000 00000000
> [  899.795217] 00000000 0f007806 2500004a 008091d0
> [  911.343869] scsi host1: ib_srp: reconnect succeeded
> [  911.376684] scsi host1: ib_srp: Out of MRs (mr_per_cmd = 1)
> [  911.407755] scsi host1: ib_srp: failed RECV status WR flushed (5) for CQE
> ffff880bf1939130
> [  911.454474] scsi host1: ib_srp: Failed to map data (-12)
> [  911.484279] mlx5_0:dump_cqe:262:(pid 7327): dump error cqe
> [  911.514784] 00000000 00000000 00000000 00000000
> [  911.540251] 00000000 00000000 00000000 00000000
> [  911.564841] 00000000 00000000 00000000 00000000
> [  911.590743] 00000000 0f007806 25000052 008089d0
> [  923.066748] scsi host1: ib_srp: reconnect succeeded
> [  923.099656] scsi host1: ib_srp: Out of MRs (mr_per_cmd = 1)
> [  923.131825] scsi host1: ib_srp: failed RECV status WR flushed (5) for CQE
> ffff880bf1939130
> [  923.179514] scsi host1: ib_srp: Failed to map data (-12)
> [  923.209307] mlx5_0:dump_cqe:262:(pid 7327): dump error cqe
> [  923.239986] 00000000 00000000 00000000 00000000
> [  923.265419] 00000000 00000000 00000000 00000000
> [  923.290102] 00000000 00000000 00000000 00000000
> [  923.315120] 00000000 0f007806 2500005a 00c4d4d0
> [  934.839336] scsi host1: ib_srp: reconnect succeeded
> [  934.874582] scsi host1: ib_srp: Out of MRs (mr_per_cmd = 1)
> [  934.906298] scsi host1: ib_srp: failed RECV status WR flushed (5) for CQE
> ffff880bf1939130
> [  934.953712] scsi host1: ib_srp: Failed to map data (-12)
> [  934.983829] mlx5_0:dump_cqe:262:(pid 7327): dump error cqe
> [  935.015371] 00000000 00000000 00000000 00000000
> [  935.041544] 00000000 00000000 00000000 00000000
> [  935.066883] 00000000 00000000 00000000 00000000
> [  935.092755] 00000000 0f007806 25000062 00c4ecd0
> [  946.610744] scsi host1: ib_srp: reconnect succeeded
> [  946.644528] scsi host1: ib_srp: Out of MRs (mr_per_cmd = 1)
> [  946.647935] mlx5_0:dump_cqe:262:(pid 752): dump error cqe
> [  946.647936] 00000000 00000000 00000000 00000000
> [  946.647937] 00000000 00000000 00000000 00000000
> [  946.647937] 00000000 00000000 00000000 00000000
> [  946.647938] 00000000 0f007806 2500006a 00c4e4d0
> [  946.647940] scsi host1: ib_srp: failed FAST REG status memory management
> operation error (6) for CQE ffff880b94268c78
> [  946.869439] scsi host1: ib_srp: Failed to map data (-12)
> 
> I will reset and restart to make sure this issue is repeatable.
> 
> Thanks
> Laurence

Sorry for typos, should have been

On the client I see this after starting a single write thread to an XFS on one of the mpaths.
Given its in ib_drain_cq figured I would let you know now.

Thanks
Laurence
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Bart Van Assche Feb. 12, 2017, 8:05 p.m. UTC | #6
On Sun, 2017-02-12 at 13:02 -0500, Laurence Oberman wrote:
> [  861.143141] WARNING: CPU: 27 PID: 1103 at drivers/infiniband/core/verbs.c:1959 __ib_drain_sq+0x1bb/0x1c0 [ib_core]

> [  861.202208] IB_POLL_DIRECT poll_ctx not supported for drain


Hello Laurence,

That warning has been removed by patch 7/8 of this series. Please double check
whether all eight patches have been applied properly.

Bart.
Bart Van Assche Feb. 12, 2017, 8:11 p.m. UTC | #7
On Sun, 2017-02-12 at 19:19 +0200, Leon Romanovsky wrote:
> On Fri, Feb 10, 2017 at 03:56:11PM -0800, Bart Van Assche wrote:

> > -static void srp_destroy_qp(struct ib_qp *qp)

> > +static void srp_destroy_qp(struct srp_rdma_ch *ch, struct ib_qp *qp)

> >  {

> > -	ib_drain_rq(qp);

> > +	spin_lock_irq(&ch->lock);

> > +	ib_process_cq_direct(ch->send_cq, -1);

> 

> I see that you are already using "-1" in your code, but the comments in the

> ib_process_cq_direct states that no new code should use "-1".

> 

>  61  * Note: for compatibility reasons -1 can be passed in %budget for unlimited

>  62  * polling.  Do not use this feature in new code, it will be removed soon.

>  63  */

>  64 int ib_process_cq_direct(struct ib_cq *cq, int budget)


Although it is possible to avoid passing -1 as 'budget' by passing a number
that is at least as large as the number of expected completions, it would
make it harder to verify the SRP initiator driver. So I propose to modify
the comment above ib_process_cq_direct().

Bart.
Laurence Oberman Feb. 13, 2017, 2:07 a.m. UTC | #8
----- Original Message -----
> From: "Bart Van Assche" <Bart.VanAssche@sandisk.com>
> To: leon@kernel.org, loberman@redhat.com
> Cc: hch@lst.de, maxg@mellanox.com, israelr@mellanox.com, linux-rdma@vger.kernel.org, dledford@redhat.com
> Sent: Sunday, February 12, 2017 3:05:16 PM
> Subject: Re: [PATCH 8/8] IB/srp: Drain the send queue before destroying a QP
> 
> On Sun, 2017-02-12 at 13:02 -0500, Laurence Oberman wrote:
> > [  861.143141] WARNING: CPU: 27 PID: 1103 at
> > drivers/infiniband/core/verbs.c:1959 __ib_drain_sq+0x1bb/0x1c0 [ib_core]
> > [  861.202208] IB_POLL_DIRECT poll_ctx not supported for drain
> 
> Hello Laurence,
> 
> That warning has been removed by patch 7/8 of this series. Please double
> check
> whether all eight patches have been applied properly.
> 
> Bart.N�����r��y���b�X��ǧv�^�)޺{.n�+����{��ٚ�{ay�ʇڙ�,j��f���h���z��w������j:+v���w�j�m��������zZ+��ݢj"��

Hello 
Just a heads up, working with Bart on this patch series.
We have stability issues with my tests in my MLX5 EDR-100 test bed. 
Thanks
Laurence
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Laurence Oberman Feb. 13, 2017, 3:14 a.m. UTC | #9
----- Original Message -----
> From: "Laurence Oberman" <loberman@redhat.com>
> To: "Bart Van Assche" <Bart.VanAssche@sandisk.com>
> Cc: leon@kernel.org, hch@lst.de, maxg@mellanox.com, israelr@mellanox.com, linux-rdma@vger.kernel.org,
> dledford@redhat.com
> Sent: Sunday, February 12, 2017 9:07:16 PM
> Subject: Re: [PATCH 8/8] IB/srp: Drain the send queue before destroying a QP
> 
> 
> 
> ----- Original Message -----
> > From: "Bart Van Assche" <Bart.VanAssche@sandisk.com>
> > To: leon@kernel.org, loberman@redhat.com
> > Cc: hch@lst.de, maxg@mellanox.com, israelr@mellanox.com,
> > linux-rdma@vger.kernel.org, dledford@redhat.com
> > Sent: Sunday, February 12, 2017 3:05:16 PM
> > Subject: Re: [PATCH 8/8] IB/srp: Drain the send queue before destroying a
> > QP
> > 
> > On Sun, 2017-02-12 at 13:02 -0500, Laurence Oberman wrote:
> > > [  861.143141] WARNING: CPU: 27 PID: 1103 at
> > > drivers/infiniband/core/verbs.c:1959 __ib_drain_sq+0x1bb/0x1c0 [ib_core]
> > > [  861.202208] IB_POLL_DIRECT poll_ctx not supported for drain
> > 
> > Hello Laurence,
> > 
> > That warning has been removed by patch 7/8 of this series. Please double
> > check
> > whether all eight patches have been applied properly.
> > 
> > Bart.N�����r��y���b�X��ǧv�^�)޺{.n�+����{��ٚ�{ay�ʇڙ�,j��f���h���z��w������j:+v���w�j�m��������zZ+��ݢj"��
> 
> Hello
> Just a heads up, working with Bart on this patch series.
> We have stability issues with my tests in my MLX5 EDR-100 test bed.
> Thanks
> Laurence
> --
> To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
> 

I went back to Linus' latest tree for a baseline and we fail the same way.
This has none of the latest 8 patches applied so we will
have to figure out what broke this.

Dont forget that I tested all this recently with Bart's dma patch series
and its solid.

Will come back to this tomorrow and see what recently made it into Linus's tree by
checking back with Doug.

[  183.779175] scsi host1: ib_srp: failed RECV status WR flushed (5) for CQE ffff880bd4270eb0
[  183.853047] 00000000 00000000 00000000 00000000
[  183.878425] 00000000 00000000 00000000 00000000
[  183.903243] 00000000 00000000 00000000 00000000
[  183.928518] 00000000 0f007806 2500002a ad9fafd1
[  198.538593] scsi host1: ib_srp: reconnect succeeded
[  198.573141] mlx5_0:dump_cqe:262:(pid 7369): dump error cqe
[  198.603037] 00000000 00000000 00000000 00000000
[  198.628884] 00000000 00000000 00000000 00000000
[  198.653961] 00000000 00000000 00000000 00000000
[  198.680021] 00000000 0f007806 25000032 00105dd0
[  198.705985] scsi host1: ib_srp: failed FAST REG status memory management operation error (6) for CQE ffff880b92860138
[  213.532848] scsi host1: ib_srp: reconnect succeeded
[  213.568828] scsi host1: ib_srp: failed RECV status WR flushed (5) for CQE ffff8817f2234c30
[  227.579684] scsi host1: ib_srp: reconnect succeeded
[  227.616175] scsi host1: ib_srp: failed RECV status WR flushed (5) for CQE ffff8817f2234c30
[  242.633925] scsi host1: ib_srp: reconnect succeeded
[  242.668160] scsi host1: ib_srp: failed RECV status WR flushed (5) for CQE ffff8817f2234c30
[  257.127715] scsi host1: ib_srp: reconnect succeeded
[  257.165623] scsi host1: ib_srp: failed RECV status WR flushed (5) for CQE ffff8817f2234c30
[  272.225762] scsi host1: ib_srp: reconnect succeeded
[  272.262570] scsi host1: ib_srp: failed RECV status WR flushed (5) for CQE ffff8817f2234c30
[  286.350226] scsi host1: ib_srp: reconnect succeeded
[  286.386160] scsi host1: ib_srp: failed RECV status WR flushed (5) for CQE ffff8817f2234c30
[  301.109365] scsi host1: ib_srp: reconnect succeeded
[  301.144930] scsi host1: ib_srp: failed RECV status WR flushed (5) for CQE ffff8817f2234c30
[  315.910860] scsi host1: ib_srp: reconnect succeeded
[  315.944594] scsi host1: ib_srp: failed RECV status WR flushed (5) for CQE ffff8817f2234c30
[  330.551052] scsi host1: ib_srp: reconnect succeeded
[  330.584552] scsi host1: ib_srp: failed RECV status WR flushed (5) for CQE ffff8817f2234c30
[  344.998448] scsi host1: ib_srp: reconnect succeeded
[  345.032115] scsi host1: ib_srp: failed RECV status WR flushed (5) for CQE ffff8817f2234c30
[  359.866731] scsi host1: ib_srp: reconnect succeeded
[  359.902114] scsi host1: ib_srp: failed RECV status WR flushed (5) for CQE ffff8817f2234c30
..
..
[  373.113045] scsi host1: ib_srp: reconnect succeeded
[  373.149511] scsi host1: ib_srp: failed RECV status WR flushed (5) for CQE ffff8817f2234c30
[  388.401469] fast_io_fail_tmo expired for SRP port-1:1 / host1.
[  388.589517] scsi host1: ib_srp: reconnect succeeded
[  388.623462] scsi host1: ib_srp: failed RECV status WR flushed (5) for CQE ffff8817f2234c30
[  403.086893] scsi host1: ib_srp: reconnect succeeded
[  403.120876] scsi host1: ib_srp: failed RECV status WR flushed (5) for CQE ffff8817f2234c30
[  403.140401] mlx5_0:dump_cqe:262:(pid 749): dump error cqe
[  403.140402] 00000000 00000000 00000000 00000000
[  403.140402] 00000000 00000000 00000000 00000000
[  403.140403] 00000000 00000000 00000000 00000000
[  403.140403] 00

--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Leon Romanovsky Feb. 13, 2017, 6:07 a.m. UTC | #10
On Sun, Feb 12, 2017 at 08:11:53PM +0000, Bart Van Assche wrote:
> On Sun, 2017-02-12 at 19:19 +0200, Leon Romanovsky wrote:
> > On Fri, Feb 10, 2017 at 03:56:11PM -0800, Bart Van Assche wrote:
> > > -static void srp_destroy_qp(struct ib_qp *qp)
> > > +static void srp_destroy_qp(struct srp_rdma_ch *ch, struct ib_qp *qp)
> > >  {
> > > -	ib_drain_rq(qp);
> > > +	spin_lock_irq(&ch->lock);
> > > +	ib_process_cq_direct(ch->send_cq, -1);
> >
> > I see that you are already using "-1" in your code, but the comments in the
> > ib_process_cq_direct states that no new code should use "-1".
> >
> >  61  * Note: for compatibility reasons -1 can be passed in %budget for unlimited
> >  62  * polling.  Do not use this feature in new code, it will be removed soon.
> >  63  */
> >  64 int ib_process_cq_direct(struct ib_cq *cq, int budget)
>
> Although it is possible to avoid passing -1 as 'budget' by passing a number
> that is at least as large as the number of expected completions, it would
> make it harder to verify the SRP initiator driver. So I propose to modify
> the comment above ib_process_cq_direct().

I don't know,
It seems like an easiest approach is to change the comment especially while
SRP is the only one user of this call. However ability to properly calculate
number of expected completions and compare it while doing destroy_qp is
a valuable thing for correctness too.

Thanks

>
> Bart.
Laurence Oberman Feb. 13, 2017, 1:54 p.m. UTC | #11
----- Original Message -----
> From: "Laurence Oberman" <loberman@redhat.com>
> To: "Bart Van Assche" <Bart.VanAssche@sandisk.com>
> Cc: leon@kernel.org, hch@lst.de, maxg@mellanox.com, israelr@mellanox.com, linux-rdma@vger.kernel.org,
> dledford@redhat.com
> Sent: Sunday, February 12, 2017 10:14:53 PM
> Subject: Re: [PATCH 8/8] IB/srp: Drain the send queue before destroying a QP
> 
> 
> 
> ----- Original Message -----
> > From: "Laurence Oberman" <loberman@redhat.com>
> > To: "Bart Van Assche" <Bart.VanAssche@sandisk.com>
> > Cc: leon@kernel.org, hch@lst.de, maxg@mellanox.com, israelr@mellanox.com,
> > linux-rdma@vger.kernel.org,
> > dledford@redhat.com
> > Sent: Sunday, February 12, 2017 9:07:16 PM
> > Subject: Re: [PATCH 8/8] IB/srp: Drain the send queue before destroying a
> > QP
> > 
> > 
> > 
> > ----- Original Message -----
> > > From: "Bart Van Assche" <Bart.VanAssche@sandisk.com>
> > > To: leon@kernel.org, loberman@redhat.com
> > > Cc: hch@lst.de, maxg@mellanox.com, israelr@mellanox.com,
> > > linux-rdma@vger.kernel.org, dledford@redhat.com
> > > Sent: Sunday, February 12, 2017 3:05:16 PM
> > > Subject: Re: [PATCH 8/8] IB/srp: Drain the send queue before destroying a
> > > QP
> > > 
> > > On Sun, 2017-02-12 at 13:02 -0500, Laurence Oberman wrote:
> > > > [  861.143141] WARNING: CPU: 27 PID: 1103 at
> > > > drivers/infiniband/core/verbs.c:1959 __ib_drain_sq+0x1bb/0x1c0
> > > > [ib_core]
> > > > [  861.202208] IB_POLL_DIRECT poll_ctx not supported for drain
> > > 
> > > Hello Laurence,
> > > 
> > > That warning has been removed by patch 7/8 of this series. Please double
> > > check
> > > whether all eight patches have been applied properly.
> > > 
> > > Bart.N�����r��y���b�X��ǧv�^�)޺{.n�+����{��ٚ�{ay�ʇڙ�,j��f���h���z��w������j:+v���w�j�m��������zZ+��ݢj"��
> > 
> > Hello
> > Just a heads up, working with Bart on this patch series.
> > We have stability issues with my tests in my MLX5 EDR-100 test bed.
> > Thanks
> > Laurence
> > --
> > To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
> > the body of a message to majordomo@vger.kernel.org
> > More majordomo info at  http://vger.kernel.org/majordomo-info.html
> > 
> 
> I went back to Linus' latest tree for a baseline and we fail the same way.
> This has none of the latest 8 patches applied so we will
> have to figure out what broke this.
> 
> Dont forget that I tested all this recently with Bart's dma patch series
> and its solid.
> 
> Will come back to this tomorrow and see what recently made it into Linus's
> tree by
> checking back with Doug.
> 
> [  183.779175] scsi host1: ib_srp: failed RECV status WR flushed (5) for CQE
> ffff880bd4270eb0
> [  183.853047] 00000000 00000000 00000000 00000000
> [  183.878425] 00000000 00000000 00000000 00000000
> [  183.903243] 00000000 00000000 00000000 00000000
> [  183.928518] 00000000 0f007806 2500002a ad9fafd1
> [  198.538593] scsi host1: ib_srp: reconnect succeeded
> [  198.573141] mlx5_0:dump_cqe:262:(pid 7369): dump error cqe
> [  198.603037] 00000000 00000000 00000000 00000000
> [  198.628884] 00000000 00000000 00000000 00000000
> [  198.653961] 00000000 00000000 00000000 00000000
> [  198.680021] 00000000 0f007806 25000032 00105dd0
> [  198.705985] scsi host1: ib_srp: failed FAST REG status memory management
> operation error (6) for CQE ffff880b92860138
> [  213.532848] scsi host1: ib_srp: reconnect succeeded
> [  213.568828] scsi host1: ib_srp: failed RECV status WR flushed (5) for CQE
> ffff8817f2234c30
> [  227.579684] scsi host1: ib_srp: reconnect succeeded
> [  227.616175] scsi host1: ib_srp: failed RECV status WR flushed (5) for CQE
> ffff8817f2234c30
> [  242.633925] scsi host1: ib_srp: reconnect succeeded
> [  242.668160] scsi host1: ib_srp: failed RECV status WR flushed (5) for CQE
> ffff8817f2234c30
> [  257.127715] scsi host1: ib_srp: reconnect succeeded
> [  257.165623] scsi host1: ib_srp: failed RECV status WR flushed (5) for CQE
> ffff8817f2234c30
> [  272.225762] scsi host1: ib_srp: reconnect succeeded
> [  272.262570] scsi host1: ib_srp: failed RECV status WR flushed (5) for CQE
> ffff8817f2234c30
> [  286.350226] scsi host1: ib_srp: reconnect succeeded
> [  286.386160] scsi host1: ib_srp: failed RECV status WR flushed (5) for CQE
> ffff8817f2234c30
> [  301.109365] scsi host1: ib_srp: reconnect succeeded
> [  301.144930] scsi host1: ib_srp: failed RECV status WR flushed (5) for CQE
> ffff8817f2234c30
> [  315.910860] scsi host1: ib_srp: reconnect succeeded
> [  315.944594] scsi host1: ib_srp: failed RECV status WR flushed (5) for CQE
> ffff8817f2234c30
> [  330.551052] scsi host1: ib_srp: reconnect succeeded
> [  330.584552] scsi host1: ib_srp: failed RECV status WR flushed (5) for CQE
> ffff8817f2234c30
> [  344.998448] scsi host1: ib_srp: reconnect succeeded
> [  345.032115] scsi host1: ib_srp: failed RECV status WR flushed (5) for CQE
> ffff8817f2234c30
> [  359.866731] scsi host1: ib_srp: reconnect succeeded
> [  359.902114] scsi host1: ib_srp: failed RECV status WR flushed (5) for CQE
> ffff8817f2234c30
> ..
> ..
> [  373.113045] scsi host1: ib_srp: reconnect succeeded
> [  373.149511] scsi host1: ib_srp: failed RECV status WR flushed (5) for CQE
> ffff8817f2234c30
> [  388.401469] fast_io_fail_tmo expired for SRP port-1:1 / host1.
> [  388.589517] scsi host1: ib_srp: reconnect succeeded
> [  388.623462] scsi host1: ib_srp: failed RECV status WR flushed (5) for CQE
> ffff8817f2234c30
> [  403.086893] scsi host1: ib_srp: reconnect succeeded
> [  403.120876] scsi host1: ib_srp: failed RECV status WR flushed (5) for CQE
> ffff8817f2234c30
> [  403.140401] mlx5_0:dump_cqe:262:(pid 749): dump error cqe
> [  403.140402] 00000000 00000000 00000000 00000000
> [  403.140402] 00000000 00000000 00000000 00000000
> [  403.140403] 00000000 00000000 00000000 00000000
> [  403.140403] 00
> 
> --
> To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
> 
Hello

Let summarize where we are and how we got here.

The last kernel I tested with mlx5 and ib_srp was vmlinuz-4.10.0-rc4 with Barts dma patches.
All tests passed.

I pulled Linus's tree and applied all 8 patches of the above series and we failed in the 
"failed FAST REG status memory management" area.

I applied only 7 of the 8 patches to Linus's tree because Bart and I thought patch 6 of the series 
may have been the catalyst.

This also failed.

Building from Barts tree which is based on 4.10.0-rc7 failed again.

This made me decide to baseline Linus's tree 4.10.0-rc7 and we fail.

So something has crept into 4.10.0-rc7 affecting this with mlx5 and ib_srp.

Thanks
Laurence
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Leon Romanovsky Feb. 13, 2017, 2:17 p.m. UTC | #12
On Mon, Feb 13, 2017 at 08:54:53AM -0500, Laurence Oberman wrote:
>
>
> ----- Original Message -----
> > From: "Laurence Oberman" <loberman@redhat.com>
> > To: "Bart Van Assche" <Bart.VanAssche@sandisk.com>
> > Cc: leon@kernel.org, hch@lst.de, maxg@mellanox.com, israelr@mellanox.com, linux-rdma@vger.kernel.org,
> > dledford@redhat.com
> > Sent: Sunday, February 12, 2017 10:14:53 PM
> > Subject: Re: [PATCH 8/8] IB/srp: Drain the send queue before destroying a QP
> >
> >
> >
> > ----- Original Message -----
> > > From: "Laurence Oberman" <loberman@redhat.com>
> > > To: "Bart Van Assche" <Bart.VanAssche@sandisk.com>
> > > Cc: leon@kernel.org, hch@lst.de, maxg@mellanox.com, israelr@mellanox.com,
> > > linux-rdma@vger.kernel.org,
> > > dledford@redhat.com
> > > Sent: Sunday, February 12, 2017 9:07:16 PM
> > > Subject: Re: [PATCH 8/8] IB/srp: Drain the send queue before destroying a
> > > QP
> > >
> > >
> > >
> > > ----- Original Message -----
> > > > From: "Bart Van Assche" <Bart.VanAssche@sandisk.com>
> > > > To: leon@kernel.org, loberman@redhat.com
> > > > Cc: hch@lst.de, maxg@mellanox.com, israelr@mellanox.com,
> > > > linux-rdma@vger.kernel.org, dledford@redhat.com
> > > > Sent: Sunday, February 12, 2017 3:05:16 PM
> > > > Subject: Re: [PATCH 8/8] IB/srp: Drain the send queue before destroying a
> > > > QP
> > > >
> > > > On Sun, 2017-02-12 at 13:02 -0500, Laurence Oberman wrote:
> > > > > [  861.143141] WARNING: CPU: 27 PID: 1103 at
> > > > > drivers/infiniband/core/verbs.c:1959 __ib_drain_sq+0x1bb/0x1c0
> > > > > [ib_core]
> > > > > [  861.202208] IB_POLL_DIRECT poll_ctx not supported for drain
> > > >
> > > > Hello Laurence,
> > > >
> > > > That warning has been removed by patch 7/8 of this series. Please double
> > > > check
> > > > whether all eight patches have been applied properly.
> > > >
> > > > Bart.N�����r��y���b�X��ǧv�^�)޺{.n�+����{��ٚ�{ay�ʇڙ�,j��f���h���z��w������j:+v���w�j�m��������zZ+��ݢj"��
> > >
> > > Hello
> > > Just a heads up, working with Bart on this patch series.
> > > We have stability issues with my tests in my MLX5 EDR-100 test bed.
> > > Thanks
> > > Laurence
> > > --
> > > To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
> > > the body of a message to majordomo@vger.kernel.org
> > > More majordomo info at  http://vger.kernel.org/majordomo-info.html
> > >
> >
> > I went back to Linus' latest tree for a baseline and we fail the same way.
> > This has none of the latest 8 patches applied so we will
> > have to figure out what broke this.
> >
> > Dont forget that I tested all this recently with Bart's dma patch series
> > and its solid.
> >
> > Will come back to this tomorrow and see what recently made it into Linus's
> > tree by
> > checking back with Doug.
> >
> > [  183.779175] scsi host1: ib_srp: failed RECV status WR flushed (5) for CQE
> > ffff880bd4270eb0
> > [  183.853047] 00000000 00000000 00000000 00000000
> > [  183.878425] 00000000 00000000 00000000 00000000
> > [  183.903243] 00000000 00000000 00000000 00000000
> > [  183.928518] 00000000 0f007806 2500002a ad9fafd1
> > [  198.538593] scsi host1: ib_srp: reconnect succeeded
> > [  198.573141] mlx5_0:dump_cqe:262:(pid 7369): dump error cqe
> > [  198.603037] 00000000 00000000 00000000 00000000
> > [  198.628884] 00000000 00000000 00000000 00000000
> > [  198.653961] 00000000 00000000 00000000 00000000
> > [  198.680021] 00000000 0f007806 25000032 00105dd0
> > [  198.705985] scsi host1: ib_srp: failed FAST REG status memory management
> > operation error (6) for CQE ffff880b92860138
> > [  213.532848] scsi host1: ib_srp: reconnect succeeded
> > [  213.568828] scsi host1: ib_srp: failed RECV status WR flushed (5) for CQE
> > ffff8817f2234c30
> > [  227.579684] scsi host1: ib_srp: reconnect succeeded
> > [  227.616175] scsi host1: ib_srp: failed RECV status WR flushed (5) for CQE
> > ffff8817f2234c30
> > [  242.633925] scsi host1: ib_srp: reconnect succeeded
> > [  242.668160] scsi host1: ib_srp: failed RECV status WR flushed (5) for CQE
> > ffff8817f2234c30
> > [  257.127715] scsi host1: ib_srp: reconnect succeeded
> > [  257.165623] scsi host1: ib_srp: failed RECV status WR flushed (5) for CQE
> > ffff8817f2234c30
> > [  272.225762] scsi host1: ib_srp: reconnect succeeded
> > [  272.262570] scsi host1: ib_srp: failed RECV status WR flushed (5) for CQE
> > ffff8817f2234c30
> > [  286.350226] scsi host1: ib_srp: reconnect succeeded
> > [  286.386160] scsi host1: ib_srp: failed RECV status WR flushed (5) for CQE
> > ffff8817f2234c30
> > [  301.109365] scsi host1: ib_srp: reconnect succeeded
> > [  301.144930] scsi host1: ib_srp: failed RECV status WR flushed (5) for CQE
> > ffff8817f2234c30
> > [  315.910860] scsi host1: ib_srp: reconnect succeeded
> > [  315.944594] scsi host1: ib_srp: failed RECV status WR flushed (5) for CQE
> > ffff8817f2234c30
> > [  330.551052] scsi host1: ib_srp: reconnect succeeded
> > [  330.584552] scsi host1: ib_srp: failed RECV status WR flushed (5) for CQE
> > ffff8817f2234c30
> > [  344.998448] scsi host1: ib_srp: reconnect succeeded
> > [  345.032115] scsi host1: ib_srp: failed RECV status WR flushed (5) for CQE
> > ffff8817f2234c30
> > [  359.866731] scsi host1: ib_srp: reconnect succeeded
> > [  359.902114] scsi host1: ib_srp: failed RECV status WR flushed (5) for CQE
> > ffff8817f2234c30
> > ..
> > ..
> > [  373.113045] scsi host1: ib_srp: reconnect succeeded
> > [  373.149511] scsi host1: ib_srp: failed RECV status WR flushed (5) for CQE
> > ffff8817f2234c30
> > [  388.401469] fast_io_fail_tmo expired for SRP port-1:1 / host1.
> > [  388.589517] scsi host1: ib_srp: reconnect succeeded
> > [  388.623462] scsi host1: ib_srp: failed RECV status WR flushed (5) for CQE
> > ffff8817f2234c30
> > [  403.086893] scsi host1: ib_srp: reconnect succeeded
> > [  403.120876] scsi host1: ib_srp: failed RECV status WR flushed (5) for CQE
> > ffff8817f2234c30
> > [  403.140401] mlx5_0:dump_cqe:262:(pid 749): dump error cqe
> > [  403.140402] 00000000 00000000 00000000 00000000
> > [  403.140402] 00000000 00000000 00000000 00000000
> > [  403.140403] 00000000 00000000 00000000 00000000
> > [  403.140403] 00
> >
> > --
> > To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
> > the body of a message to majordomo@vger.kernel.org
> > More majordomo info at  http://vger.kernel.org/majordomo-info.html
> >
> Hello
>
> Let summarize where we are and how we got here.
>
> The last kernel I tested with mlx5 and ib_srp was vmlinuz-4.10.0-rc4 with Barts dma patches.
> All tests passed.
>
> I pulled Linus's tree and applied all 8 patches of the above series and we failed in the
> "failed FAST REG status memory management" area.
>
> I applied only 7 of the 8 patches to Linus's tree because Bart and I thought patch 6 of the series
> may have been the catalyst.
>
> This also failed.
>
> Building from Barts tree which is based on 4.10.0-rc7 failed again.
>
> This made me decide to baseline Linus's tree 4.10.0-rc7 and we fail.
>
> So something has crept into 4.10.0-rc7 affecting this with mlx5 and ib_srp.

From infiniband side:
➜  linux-rdma git:(queue-next) git log v4.10-rc4...v4.10-rc7 -- drivers/inifiniband |wc
      0       0       0

From eth nothing suspicious too:
➜  linux-rdma git:(queue-next) git l v4.10-rc4...v4.10-rc7 -- drivers/net/ethernet/mellanox/mlx5
d15118af2683 net/mlx5e: Check ets capability before ets query FW command
a100ff3eef19 net/mlx5e: Fix update of hash function/key via ethtool
1d3398facd08 net/mlx5e: Modify TIRs hash only when it's needed
3e621b19b0bb net/mlx5e: Support TC encapsulation offloads with upper devices
5bae8c031053 net/mlx5: E-Switch, Re-enable RoCE on mode change only after FDB destroy
5403dc703ff2 net/mlx5: E-Switch, Err when retrieving steering name-space fails
eff596da4878 net/mlx5: Return EOPNOTSUPP when failing to get steering name-space
9eb7892351a3 net/mlx5: Change ENOTSUPP to EOPNOTSUPP
e048fc50d7bd net/mlx5e: Do not recycle pages from emergency reserve
ad05df399f33 net/mlx5e: Remove unused variable
639e9e94160e net/mlx5e: Remove unnecessary checks when setting num channels
abeffce90c7f net/mlx5e: Fix a -Wmaybe-uninitialized warning


>
> Thanks
> Laurence
Laurence Oberman Feb. 13, 2017, 2:24 p.m. UTC | #13
----- Original Message -----
> From: "Leon Romanovsky" <leon@kernel.org>
> To: "Laurence Oberman" <loberman@redhat.com>
> Cc: "Bart Van Assche" <Bart.VanAssche@sandisk.com>, hch@lst.de, maxg@mellanox.com, israelr@mellanox.com,
> linux-rdma@vger.kernel.org, dledford@redhat.com
> Sent: Monday, February 13, 2017 9:17:24 AM
> Subject: Re: [PATCH 8/8] IB/srp: Drain the send queue before destroying a QP
> 
> On Mon, Feb 13, 2017 at 08:54:53AM -0500, Laurence Oberman wrote:
> >
> >
> > ----- Original Message -----
> > > From: "Laurence Oberman" <loberman@redhat.com>
> > > To: "Bart Van Assche" <Bart.VanAssche@sandisk.com>
> > > Cc: leon@kernel.org, hch@lst.de, maxg@mellanox.com, israelr@mellanox.com,
> > > linux-rdma@vger.kernel.org,
> > > dledford@redhat.com
> > > Sent: Sunday, February 12, 2017 10:14:53 PM
> > > Subject: Re: [PATCH 8/8] IB/srp: Drain the send queue before destroying a
> > > QP
> > >
> > >
> > >
> > > ----- Original Message -----
> > > > From: "Laurence Oberman" <loberman@redhat.com>
> > > > To: "Bart Van Assche" <Bart.VanAssche@sandisk.com>
> > > > Cc: leon@kernel.org, hch@lst.de, maxg@mellanox.com,
> > > > israelr@mellanox.com,
> > > > linux-rdma@vger.kernel.org,
> > > > dledford@redhat.com
> > > > Sent: Sunday, February 12, 2017 9:07:16 PM
> > > > Subject: Re: [PATCH 8/8] IB/srp: Drain the send queue before destroying
> > > > a
> > > > QP
> > > >
> > > >
> > > >
> > > > ----- Original Message -----
> > > > > From: "Bart Van Assche" <Bart.VanAssche@sandisk.com>
> > > > > To: leon@kernel.org, loberman@redhat.com
> > > > > Cc: hch@lst.de, maxg@mellanox.com, israelr@mellanox.com,
> > > > > linux-rdma@vger.kernel.org, dledford@redhat.com
> > > > > Sent: Sunday, February 12, 2017 3:05:16 PM
> > > > > Subject: Re: [PATCH 8/8] IB/srp: Drain the send queue before
> > > > > destroying a
> > > > > QP
> > > > >
> > > > > On Sun, 2017-02-12 at 13:02 -0500, Laurence Oberman wrote:
> > > > > > [  861.143141] WARNING: CPU: 27 PID: 1103 at
> > > > > > drivers/infiniband/core/verbs.c:1959 __ib_drain_sq+0x1bb/0x1c0
> > > > > > [ib_core]
> > > > > > [  861.202208] IB_POLL_DIRECT poll_ctx not supported for drain
> > > > >
> > > > > Hello Laurence,
> > > > >
> > > > > That warning has been removed by patch 7/8 of this series. Please
> > > > > double
> > > > > check
> > > > > whether all eight patches have been applied properly.
> > > > >
> > > > > Bart.N�����r��y���b�X��ǧv�^�)޺{.n�+����{��ٚ�{ay�ʇڙ�,j��f���h���z��w������j:+v���w�j�m��������zZ+��ݢj"��
> > > >
> > > > Hello
> > > > Just a heads up, working with Bart on this patch series.
> > > > We have stability issues with my tests in my MLX5 EDR-100 test bed.
> > > > Thanks
> > > > Laurence
> > > > --
> > > > To unsubscribe from this list: send the line "unsubscribe linux-rdma"
> > > > in
> > > > the body of a message to majordomo@vger.kernel.org
> > > > More majordomo info at  http://vger.kernel.org/majordomo-info.html
> > > >
> > >
> > > I went back to Linus' latest tree for a baseline and we fail the same
> > > way.
> > > This has none of the latest 8 patches applied so we will
> > > have to figure out what broke this.
> > >
> > > Dont forget that I tested all this recently with Bart's dma patch series
> > > and its solid.
> > >
> > > Will come back to this tomorrow and see what recently made it into
> > > Linus's
> > > tree by
> > > checking back with Doug.
> > >
> > > [  183.779175] scsi host1: ib_srp: failed RECV status WR flushed (5) for
> > > CQE
> > > ffff880bd4270eb0
> > > [  183.853047] 00000000 00000000 00000000 00000000
> > > [  183.878425] 00000000 00000000 00000000 00000000
> > > [  183.903243] 00000000 00000000 00000000 00000000
> > > [  183.928518] 00000000 0f007806 2500002a ad9fafd1
> > > [  198.538593] scsi host1: ib_srp: reconnect succeeded
> > > [  198.573141] mlx5_0:dump_cqe:262:(pid 7369): dump error cqe
> > > [  198.603037] 00000000 00000000 00000000 00000000
> > > [  198.628884] 00000000 00000000 00000000 00000000
> > > [  198.653961] 00000000 00000000 00000000 00000000
> > > [  198.680021] 00000000 0f007806 25000032 00105dd0
> > > [  198.705985] scsi host1: ib_srp: failed FAST REG status memory
> > > management
> > > operation error (6) for CQE ffff880b92860138
> > > [  213.532848] scsi host1: ib_srp: reconnect succeeded
> > > [  213.568828] scsi host1: ib_srp: failed RECV status WR flushed (5) for
> > > CQE
> > > ffff8817f2234c30
> > > [  227.579684] scsi host1: ib_srp: reconnect succeeded
> > > [  227.616175] scsi host1: ib_srp: failed RECV status WR flushed (5) for
> > > CQE
> > > ffff8817f2234c30
> > > [  242.633925] scsi host1: ib_srp: reconnect succeeded
> > > [  242.668160] scsi host1: ib_srp: failed RECV status WR flushed (5) for
> > > CQE
> > > ffff8817f2234c30
> > > [  257.127715] scsi host1: ib_srp: reconnect succeeded
> > > [  257.165623] scsi host1: ib_srp: failed RECV status WR flushed (5) for
> > > CQE
> > > ffff8817f2234c30
> > > [  272.225762] scsi host1: ib_srp: reconnect succeeded
> > > [  272.262570] scsi host1: ib_srp: failed RECV status WR flushed (5) for
> > > CQE
> > > ffff8817f2234c30
> > > [  286.350226] scsi host1: ib_srp: reconnect succeeded
> > > [  286.386160] scsi host1: ib_srp: failed RECV status WR flushed (5) for
> > > CQE
> > > ffff8817f2234c30
> > > [  301.109365] scsi host1: ib_srp: reconnect succeeded
> > > [  301.144930] scsi host1: ib_srp: failed RECV status WR flushed (5) for
> > > CQE
> > > ffff8817f2234c30
> > > [  315.910860] scsi host1: ib_srp: reconnect succeeded
> > > [  315.944594] scsi host1: ib_srp: failed RECV status WR flushed (5) for
> > > CQE
> > > ffff8817f2234c30
> > > [  330.551052] scsi host1: ib_srp: reconnect succeeded
> > > [  330.584552] scsi host1: ib_srp: failed RECV status WR flushed (5) for
> > > CQE
> > > ffff8817f2234c30
> > > [  344.998448] scsi host1: ib_srp: reconnect succeeded
> > > [  345.032115] scsi host1: ib_srp: failed RECV status WR flushed (5) for
> > > CQE
> > > ffff8817f2234c30
> > > [  359.866731] scsi host1: ib_srp: reconnect succeeded
> > > [  359.902114] scsi host1: ib_srp: failed RECV status WR flushed (5) for
> > > CQE
> > > ffff8817f2234c30
> > > ..
> > > ..
> > > [  373.113045] scsi host1: ib_srp: reconnect succeeded
> > > [  373.149511] scsi host1: ib_srp: failed RECV status WR flushed (5) for
> > > CQE
> > > ffff8817f2234c30
> > > [  388.401469] fast_io_fail_tmo expired for SRP port-1:1 / host1.
> > > [  388.589517] scsi host1: ib_srp: reconnect succeeded
> > > [  388.623462] scsi host1: ib_srp: failed RECV status WR flushed (5) for
> > > CQE
> > > ffff8817f2234c30
> > > [  403.086893] scsi host1: ib_srp: reconnect succeeded
> > > [  403.120876] scsi host1: ib_srp: failed RECV status WR flushed (5) for
> > > CQE
> > > ffff8817f2234c30
> > > [  403.140401] mlx5_0:dump_cqe:262:(pid 749): dump error cqe
> > > [  403.140402] 00000000 00000000 00000000 00000000
> > > [  403.140402] 00000000 00000000 00000000 00000000
> > > [  403.140403] 00000000 00000000 00000000 00000000
> > > [  403.140403] 00
> > >
> > > --
> > > To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
> > > the body of a message to majordomo@vger.kernel.org
> > > More majordomo info at  http://vger.kernel.org/majordomo-info.html
> > >
> > Hello
> >
> > Let summarize where we are and how we got here.
> >
> > The last kernel I tested with mlx5 and ib_srp was vmlinuz-4.10.0-rc4 with
> > Barts dma patches.
> > All tests passed.
> >
> > I pulled Linus's tree and applied all 8 patches of the above series and we
> > failed in the
> > "failed FAST REG status memory management" area.
> >
> > I applied only 7 of the 8 patches to Linus's tree because Bart and I
> > thought patch 6 of the series
> > may have been the catalyst.
> >
> > This also failed.
> >
> > Building from Barts tree which is based on 4.10.0-rc7 failed again.
> >
> > This made me decide to baseline Linus's tree 4.10.0-rc7 and we fail.
> >
> > So something has crept into 4.10.0-rc7 affecting this with mlx5 and ib_srp.
> 
> From infiniband side:
> ➜  linux-rdma git:(queue-next) git log v4.10-rc4...v4.10-rc7 --
> drivers/inifiniband |wc
>       0       0       0
> 
> From eth nothing suspicious too:
> ➜  linux-rdma git:(queue-next) git l v4.10-rc4...v4.10-rc7 --
> drivers/net/ethernet/mellanox/mlx5
> d15118af2683 net/mlx5e: Check ets capability before ets query FW command
> a100ff3eef19 net/mlx5e: Fix update of hash function/key via ethtool
> 1d3398facd08 net/mlx5e: Modify TIRs hash only when it's needed
> 3e621b19b0bb net/mlx5e: Support TC encapsulation offloads with upper devices
> 5bae8c031053 net/mlx5: E-Switch, Re-enable RoCE on mode change only after FDB
> destroy
> 5403dc703ff2 net/mlx5: E-Switch, Err when retrieving steering name-space
> fails
> eff596da4878 net/mlx5: Return EOPNOTSUPP when failing to get steering
> name-space
> 9eb7892351a3 net/mlx5: Change ENOTSUPP to EOPNOTSUPP
> e048fc50d7bd net/mlx5e: Do not recycle pages from emergency reserve
> ad05df399f33 net/mlx5e: Remove unused variable
> 639e9e94160e net/mlx5e: Remove unnecessary checks when setting num channels
> abeffce90c7f net/mlx5e: Fix a -Wmaybe-uninitialized warning
> 
> 
> >
> > Thanks
> > Laurence
> 

Hi Leon, 
Yep, I also looked for outliers here that may look suspicious and did not see any.

I guess I will have to start bisecting.
I will start with rc5, if that fails will bisect between rc4 and rc5, as we know rc4 was fine.

I did re-run tests on rc4 last night and I was stable.

Thanks
Laurence
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Laurence Oberman Feb. 13, 2017, 4:12 p.m. UTC | #14
----- Original Message -----
> From: "Laurence Oberman" <loberman@redhat.com>
> To: "Leon Romanovsky" <leon@kernel.org>
> Cc: "Bart Van Assche" <Bart.VanAssche@sandisk.com>, hch@lst.de, maxg@mellanox.com, israelr@mellanox.com,
> linux-rdma@vger.kernel.org, dledford@redhat.com
> Sent: Monday, February 13, 2017 9:24:01 AM
> Subject: Re: [PATCH 8/8] IB/srp: Drain the send queue before destroying a QP
> 
> 
> 
> ----- Original Message -----
> > From: "Leon Romanovsky" <leon@kernel.org>
> > To: "Laurence Oberman" <loberman@redhat.com>
> > Cc: "Bart Van Assche" <Bart.VanAssche@sandisk.com>, hch@lst.de,
> > maxg@mellanox.com, israelr@mellanox.com,
> > linux-rdma@vger.kernel.org, dledford@redhat.com
> > Sent: Monday, February 13, 2017 9:17:24 AM
> > Subject: Re: [PATCH 8/8] IB/srp: Drain the send queue before destroying a
> > QP
> > 
> > On Mon, Feb 13, 2017 at 08:54:53AM -0500, Laurence Oberman wrote:
> > >
> > >
> > > ----- Original Message -----
> > > > From: "Laurence Oberman" <loberman@redhat.com>
> > > > To: "Bart Van Assche" <Bart.VanAssche@sandisk.com>
> > > > Cc: leon@kernel.org, hch@lst.de, maxg@mellanox.com,
> > > > israelr@mellanox.com,
> > > > linux-rdma@vger.kernel.org,
> > > > dledford@redhat.com
> > > > Sent: Sunday, February 12, 2017 10:14:53 PM
> > > > Subject: Re: [PATCH 8/8] IB/srp: Drain the send queue before destroying
> > > > a
> > > > QP
> > > >
> > > >
> > > >
> > > > ----- Original Message -----
> > > > > From: "Laurence Oberman" <loberman@redhat.com>
> > > > > To: "Bart Van Assche" <Bart.VanAssche@sandisk.com>
> > > > > Cc: leon@kernel.org, hch@lst.de, maxg@mellanox.com,
> > > > > israelr@mellanox.com,
> > > > > linux-rdma@vger.kernel.org,
> > > > > dledford@redhat.com
> > > > > Sent: Sunday, February 12, 2017 9:07:16 PM
> > > > > Subject: Re: [PATCH 8/8] IB/srp: Drain the send queue before
> > > > > destroying
> > > > > a
> > > > > QP
> > > > >
> > > > >
> > > > >
> > > > > ----- Original Message -----
> > > > > > From: "Bart Van Assche" <Bart.VanAssche@sandisk.com>
> > > > > > To: leon@kernel.org, loberman@redhat.com
> > > > > > Cc: hch@lst.de, maxg@mellanox.com, israelr@mellanox.com,
> > > > > > linux-rdma@vger.kernel.org, dledford@redhat.com
> > > > > > Sent: Sunday, February 12, 2017 3:05:16 PM
> > > > > > Subject: Re: [PATCH 8/8] IB/srp: Drain the send queue before
> > > > > > destroying a
> > > > > > QP
> > > > > >
> > > > > > On Sun, 2017-02-12 at 13:02 -0500, Laurence Oberman wrote:
> > > > > > > [  861.143141] WARNING: CPU: 27 PID: 1103 at
> > > > > > > drivers/infiniband/core/verbs.c:1959 __ib_drain_sq+0x1bb/0x1c0
> > > > > > > [ib_core]
> > > > > > > [  861.202208] IB_POLL_DIRECT poll_ctx not supported for drain
> > > > > >
> > > > > > Hello Laurence,
> > > > > >
> > > > > > That warning has been removed by patch 7/8 of this series. Please
> > > > > > double
> > > > > > check
> > > > > > whether all eight patches have been applied properly.
> > > > > >
> > > > > > Bart.N�����r��y���b�X��ǧv�^�)޺{.n�+����{��ٚ�{ay�ʇڙ�,j��f���h���z��w������j:+v���w�j�m��������zZ+��ݢj"��
> > > > >
> > > > > Hello
> > > > > Just a heads up, working with Bart on this patch series.
> > > > > We have stability issues with my tests in my MLX5 EDR-100 test bed.
> > > > > Thanks
> > > > > Laurence
> > > > > --
> > > > > To unsubscribe from this list: send the line "unsubscribe linux-rdma"
> > > > > in
> > > > > the body of a message to majordomo@vger.kernel.org
> > > > > More majordomo info at  http://vger.kernel.org/majordomo-info.html
> > > > >
> > > >
> > > > I went back to Linus' latest tree for a baseline and we fail the same
> > > > way.
> > > > This has none of the latest 8 patches applied so we will
> > > > have to figure out what broke this.
> > > >
> > > > Dont forget that I tested all this recently with Bart's dma patch
> > > > series
> > > > and its solid.
> > > >
> > > > Will come back to this tomorrow and see what recently made it into
> > > > Linus's
> > > > tree by
> > > > checking back with Doug.
> > > >
> > > > [  183.779175] scsi host1: ib_srp: failed RECV status WR flushed (5)
> > > > for
> > > > CQE
> > > > ffff880bd4270eb0
> > > > [  183.853047] 00000000 00000000 00000000 00000000
> > > > [  183.878425] 00000000 00000000 00000000 00000000
> > > > [  183.903243] 00000000 00000000 00000000 00000000
> > > > [  183.928518] 00000000 0f007806 2500002a ad9fafd1
> > > > [  198.538593] scsi host1: ib_srp: reconnect succeeded
> > > > [  198.573141] mlx5_0:dump_cqe:262:(pid 7369): dump error cqe
> > > > [  198.603037] 00000000 00000000 00000000 00000000
> > > > [  198.628884] 00000000 00000000 00000000 00000000
> > > > [  198.653961] 00000000 00000000 00000000 00000000
> > > > [  198.680021] 00000000 0f007806 25000032 00105dd0
> > > > [  198.705985] scsi host1: ib_srp: failed FAST REG status memory
> > > > management
> > > > operation error (6) for CQE ffff880b92860138
> > > > [  213.532848] scsi host1: ib_srp: reconnect succeeded
> > > > [  213.568828] scsi host1: ib_srp: failed RECV status WR flushed (5)
> > > > for
> > > > CQE
> > > > ffff8817f2234c30
> > > > [  227.579684] scsi host1: ib_srp: reconnect succeeded
> > > > [  227.616175] scsi host1: ib_srp: failed RECV status WR flushed (5)
> > > > for
> > > > CQE
> > > > ffff8817f2234c30
> > > > [  242.633925] scsi host1: ib_srp: reconnect succeeded
> > > > [  242.668160] scsi host1: ib_srp: failed RECV status WR flushed (5)
> > > > for
> > > > CQE
> > > > ffff8817f2234c30
> > > > [  257.127715] scsi host1: ib_srp: reconnect succeeded
> > > > [  257.165623] scsi host1: ib_srp: failed RECV status WR flushed (5)
> > > > for
> > > > CQE
> > > > ffff8817f2234c30
> > > > [  272.225762] scsi host1: ib_srp: reconnect succeeded
> > > > [  272.262570] scsi host1: ib_srp: failed RECV status WR flushed (5)
> > > > for
> > > > CQE
> > > > ffff8817f2234c30
> > > > [  286.350226] scsi host1: ib_srp: reconnect succeeded
> > > > [  286.386160] scsi host1: ib_srp: failed RECV status WR flushed (5)
> > > > for
> > > > CQE
> > > > ffff8817f2234c30
> > > > [  301.109365] scsi host1: ib_srp: reconnect succeeded
> > > > [  301.144930] scsi host1: ib_srp: failed RECV status WR flushed (5)
> > > > for
> > > > CQE
> > > > ffff8817f2234c30
> > > > [  315.910860] scsi host1: ib_srp: reconnect succeeded
> > > > [  315.944594] scsi host1: ib_srp: failed RECV status WR flushed (5)
> > > > for
> > > > CQE
> > > > ffff8817f2234c30
> > > > [  330.551052] scsi host1: ib_srp: reconnect succeeded
> > > > [  330.584552] scsi host1: ib_srp: failed RECV status WR flushed (5)
> > > > for
> > > > CQE
> > > > ffff8817f2234c30
> > > > [  344.998448] scsi host1: ib_srp: reconnect succeeded
> > > > [  345.032115] scsi host1: ib_srp: failed RECV status WR flushed (5)
> > > > for
> > > > CQE
> > > > ffff8817f2234c30
> > > > [  359.866731] scsi host1: ib_srp: reconnect succeeded
> > > > [  359.902114] scsi host1: ib_srp: failed RECV status WR flushed (5)
> > > > for
> > > > CQE
> > > > ffff8817f2234c30
> > > > ..
> > > > ..
> > > > [  373.113045] scsi host1: ib_srp: reconnect succeeded
> > > > [  373.149511] scsi host1: ib_srp: failed RECV status WR flushed (5)
> > > > for
> > > > CQE
> > > > ffff8817f2234c30
> > > > [  388.401469] fast_io_fail_tmo expired for SRP port-1:1 / host1.
> > > > [  388.589517] scsi host1: ib_srp: reconnect succeeded
> > > > [  388.623462] scsi host1: ib_srp: failed RECV status WR flushed (5)
> > > > for
> > > > CQE
> > > > ffff8817f2234c30
> > > > [  403.086893] scsi host1: ib_srp: reconnect succeeded
> > > > [  403.120876] scsi host1: ib_srp: failed RECV status WR flushed (5)
> > > > for
> > > > CQE
> > > > ffff8817f2234c30
> > > > [  403.140401] mlx5_0:dump_cqe:262:(pid 749): dump error cqe
> > > > [  403.140402] 00000000 00000000 00000000 00000000
> > > > [  403.140402] 00000000 00000000 00000000 00000000
> > > > [  403.140403] 00000000 00000000 00000000 00000000
> > > > [  403.140403] 00
> > > >
> > > > --
> > > > To unsubscribe from this list: send the line "unsubscribe linux-rdma"
> > > > in
> > > > the body of a message to majordomo@vger.kernel.org
> > > > More majordomo info at  http://vger.kernel.org/majordomo-info.html
> > > >
> > > Hello
> > >
> > > Let summarize where we are and how we got here.
> > >
> > > The last kernel I tested with mlx5 and ib_srp was vmlinuz-4.10.0-rc4 with
> > > Barts dma patches.
> > > All tests passed.
> > >
> > > I pulled Linus's tree and applied all 8 patches of the above series and
> > > we
> > > failed in the
> > > "failed FAST REG status memory management" area.
> > >
> > > I applied only 7 of the 8 patches to Linus's tree because Bart and I
> > > thought patch 6 of the series
> > > may have been the catalyst.
> > >
> > > This also failed.
> > >
> > > Building from Barts tree which is based on 4.10.0-rc7 failed again.
> > >
> > > This made me decide to baseline Linus's tree 4.10.0-rc7 and we fail.
> > >
> > > So something has crept into 4.10.0-rc7 affecting this with mlx5 and
> > > ib_srp.
> > 
> > From infiniband side:
> > ➜  linux-rdma git:(queue-next) git log v4.10-rc4...v4.10-rc7 --
> > drivers/inifiniband |wc
> >       0       0       0
> > 
> > From eth nothing suspicious too:
> > ➜  linux-rdma git:(queue-next) git l v4.10-rc4...v4.10-rc7 --
> > drivers/net/ethernet/mellanox/mlx5
> > d15118af2683 net/mlx5e: Check ets capability before ets query FW command
> > a100ff3eef19 net/mlx5e: Fix update of hash function/key via ethtool
> > 1d3398facd08 net/mlx5e: Modify TIRs hash only when it's needed
> > 3e621b19b0bb net/mlx5e: Support TC encapsulation offloads with upper
> > devices
> > 5bae8c031053 net/mlx5: E-Switch, Re-enable RoCE on mode change only after
> > FDB
> > destroy
> > 5403dc703ff2 net/mlx5: E-Switch, Err when retrieving steering name-space
> > fails
> > eff596da4878 net/mlx5: Return EOPNOTSUPP when failing to get steering
> > name-space
> > 9eb7892351a3 net/mlx5: Change ENOTSUPP to EOPNOTSUPP
> > e048fc50d7bd net/mlx5e: Do not recycle pages from emergency reserve
> > ad05df399f33 net/mlx5e: Remove unused variable
> > 639e9e94160e net/mlx5e: Remove unnecessary checks when setting num channels
> > abeffce90c7f net/mlx5e: Fix a -Wmaybe-uninitialized warning
> > 
> > 
> > >
> > > Thanks
> > > Laurence
> > 
> 
> Hi Leon,
> Yep, I also looked for outliers here that may look suspicious and did not see
> any.
> 
> I guess I will have to start bisecting.
> I will start with rc5, if that fails will bisect between rc4 and rc5, as we
> know rc4 was fine.
> 
> I did re-run tests on rc4 last night and I was stable.
> 
> Thanks
> Laurence
> --
> To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
> 

OK, so 4.10.0-rc5 is fine, 4.10.0-rc6 fails, so will start bisecting.
Unless one of you think you know what may be causing this in rc6.
This will take time so will come back to the list once I have it isolated.

Thanks
Laurence
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Laurence Oberman Feb. 13, 2017, 4:47 p.m. UTC | #15
----- Original Message -----
> From: "Laurence Oberman" <loberman@redhat.com>
> To: "Leon Romanovsky" <leon@kernel.org>
> Cc: "Bart Van Assche" <Bart.VanAssche@sandisk.com>, hch@lst.de, maxg@mellanox.com, israelr@mellanox.com,
> linux-rdma@vger.kernel.org, dledford@redhat.com
> Sent: Monday, February 13, 2017 11:12:55 AM
> Subject: Re: [PATCH 8/8] IB/srp: Drain the send queue before destroying a QP
> 
> 
> 
> ----- Original Message -----
> > From: "Laurence Oberman" <loberman@redhat.com>
> > To: "Leon Romanovsky" <leon@kernel.org>
> > Cc: "Bart Van Assche" <Bart.VanAssche@sandisk.com>, hch@lst.de,
> > maxg@mellanox.com, israelr@mellanox.com,
> > linux-rdma@vger.kernel.org, dledford@redhat.com
> > Sent: Monday, February 13, 2017 9:24:01 AM
> > Subject: Re: [PATCH 8/8] IB/srp: Drain the send queue before destroying a
> > QP
> > 
> > 
> > 
> > ----- Original Message -----
> > > From: "Leon Romanovsky" <leon@kernel.org>
> > > To: "Laurence Oberman" <loberman@redhat.com>
> > > Cc: "Bart Van Assche" <Bart.VanAssche@sandisk.com>, hch@lst.de,
> > > maxg@mellanox.com, israelr@mellanox.com,
> > > linux-rdma@vger.kernel.org, dledford@redhat.com
> > > Sent: Monday, February 13, 2017 9:17:24 AM
> > > Subject: Re: [PATCH 8/8] IB/srp: Drain the send queue before destroying a
> > > QP
> > > 
> > > On Mon, Feb 13, 2017 at 08:54:53AM -0500, Laurence Oberman wrote:
> > > >
> > > >
> > > > ----- Original Message -----
> > > > > From: "Laurence Oberman" <loberman@redhat.com>
> > > > > To: "Bart Van Assche" <Bart.VanAssche@sandisk.com>
> > > > > Cc: leon@kernel.org, hch@lst.de, maxg@mellanox.com,
> > > > > israelr@mellanox.com,
> > > > > linux-rdma@vger.kernel.org,
> > > > > dledford@redhat.com
> > > > > Sent: Sunday, February 12, 2017 10:14:53 PM
> > > > > Subject: Re: [PATCH 8/8] IB/srp: Drain the send queue before
> > > > > destroying
> > > > > a
> > > > > QP
> > > > >
> > > > >
> > > > >
> > > > > ----- Original Message -----
> > > > > > From: "Laurence Oberman" <loberman@redhat.com>
> > > > > > To: "Bart Van Assche" <Bart.VanAssche@sandisk.com>
> > > > > > Cc: leon@kernel.org, hch@lst.de, maxg@mellanox.com,
> > > > > > israelr@mellanox.com,
> > > > > > linux-rdma@vger.kernel.org,
> > > > > > dledford@redhat.com
> > > > > > Sent: Sunday, February 12, 2017 9:07:16 PM
> > > > > > Subject: Re: [PATCH 8/8] IB/srp: Drain the send queue before
> > > > > > destroying
> > > > > > a
> > > > > > QP
> > > > > >
> > > > > >
> > > > > >
> > > > > > ----- Original Message -----
> > > > > > > From: "Bart Van Assche" <Bart.VanAssche@sandisk.com>
> > > > > > > To: leon@kernel.org, loberman@redhat.com
> > > > > > > Cc: hch@lst.de, maxg@mellanox.com, israelr@mellanox.com,
> > > > > > > linux-rdma@vger.kernel.org, dledford@redhat.com
> > > > > > > Sent: Sunday, February 12, 2017 3:05:16 PM
> > > > > > > Subject: Re: [PATCH 8/8] IB/srp: Drain the send queue before
> > > > > > > destroying a
> > > > > > > QP
> > > > > > >
> > > > > > > On Sun, 2017-02-12 at 13:02 -0500, Laurence Oberman wrote:
> > > > > > > > [  861.143141] WARNING: CPU: 27 PID: 1103 at
> > > > > > > > drivers/infiniband/core/verbs.c:1959 __ib_drain_sq+0x1bb/0x1c0
> > > > > > > > [ib_core]
> > > > > > > > [  861.202208] IB_POLL_DIRECT poll_ctx not supported for drain
> > > > > > >
> > > > > > > Hello Laurence,
> > > > > > >
> > > > > > > That warning has been removed by patch 7/8 of this series. Please
> > > > > > > double
> > > > > > > check
> > > > > > > whether all eight patches have been applied properly.
> > > > > > >
> > > > > > > Bart.N�����r��y���b�X��ǧv�^�)޺{.n�+����{��ٚ�{ay�ʇڙ�,j��f���h���z��w������j:+v���w�j�m��������zZ+��ݢj"��
> > > > > >
> > > > > > Hello
> > > > > > Just a heads up, working with Bart on this patch series.
> > > > > > We have stability issues with my tests in my MLX5 EDR-100 test bed.
> > > > > > Thanks
> > > > > > Laurence
> > > > > > --
> > > > > > To unsubscribe from this list: send the line "unsubscribe
> > > > > > linux-rdma"
> > > > > > in
> > > > > > the body of a message to majordomo@vger.kernel.org
> > > > > > More majordomo info at  http://vger.kernel.org/majordomo-info.html
> > > > > >
> > > > >
> > > > > I went back to Linus' latest tree for a baseline and we fail the same
> > > > > way.
> > > > > This has none of the latest 8 patches applied so we will
> > > > > have to figure out what broke this.
> > > > >
> > > > > Dont forget that I tested all this recently with Bart's dma patch
> > > > > series
> > > > > and its solid.
> > > > >
> > > > > Will come back to this tomorrow and see what recently made it into
> > > > > Linus's
> > > > > tree by
> > > > > checking back with Doug.
> > > > >
> > > > > [  183.779175] scsi host1: ib_srp: failed RECV status WR flushed (5)
> > > > > for
> > > > > CQE
> > > > > ffff880bd4270eb0
> > > > > [  183.853047] 00000000 00000000 00000000 00000000
> > > > > [  183.878425] 00000000 00000000 00000000 00000000
> > > > > [  183.903243] 00000000 00000000 00000000 00000000
> > > > > [  183.928518] 00000000 0f007806 2500002a ad9fafd1
> > > > > [  198.538593] scsi host1: ib_srp: reconnect succeeded
> > > > > [  198.573141] mlx5_0:dump_cqe:262:(pid 7369): dump error cqe
> > > > > [  198.603037] 00000000 00000000 00000000 00000000
> > > > > [  198.628884] 00000000 00000000 00000000 00000000
> > > > > [  198.653961] 00000000 00000000 00000000 00000000
> > > > > [  198.680021] 00000000 0f007806 25000032 00105dd0
> > > > > [  198.705985] scsi host1: ib_srp: failed FAST REG status memory
> > > > > management
> > > > > operation error (6) for CQE ffff880b92860138
> > > > > [  213.532848] scsi host1: ib_srp: reconnect succeeded
> > > > > [  213.568828] scsi host1: ib_srp: failed RECV status WR flushed (5)
> > > > > for
> > > > > CQE
> > > > > ffff8817f2234c30
> > > > > [  227.579684] scsi host1: ib_srp: reconnect succeeded
> > > > > [  227.616175] scsi host1: ib_srp: failed RECV status WR flushed (5)
> > > > > for
> > > > > CQE
> > > > > ffff8817f2234c30
> > > > > [  242.633925] scsi host1: ib_srp: reconnect succeeded
> > > > > [  242.668160] scsi host1: ib_srp: failed RECV status WR flushed (5)
> > > > > for
> > > > > CQE
> > > > > ffff8817f2234c30
> > > > > [  257.127715] scsi host1: ib_srp: reconnect succeeded
> > > > > [  257.165623] scsi host1: ib_srp: failed RECV status WR flushed (5)
> > > > > for
> > > > > CQE
> > > > > ffff8817f2234c30
> > > > > [  272.225762] scsi host1: ib_srp: reconnect succeeded
> > > > > [  272.262570] scsi host1: ib_srp: failed RECV status WR flushed (5)
> > > > > for
> > > > > CQE
> > > > > ffff8817f2234c30
> > > > > [  286.350226] scsi host1: ib_srp: reconnect succeeded
> > > > > [  286.386160] scsi host1: ib_srp: failed RECV status WR flushed (5)
> > > > > for
> > > > > CQE
> > > > > ffff8817f2234c30
> > > > > [  301.109365] scsi host1: ib_srp: reconnect succeeded
> > > > > [  301.144930] scsi host1: ib_srp: failed RECV status WR flushed (5)
> > > > > for
> > > > > CQE
> > > > > ffff8817f2234c30
> > > > > [  315.910860] scsi host1: ib_srp: reconnect succeeded
> > > > > [  315.944594] scsi host1: ib_srp: failed RECV status WR flushed (5)
> > > > > for
> > > > > CQE
> > > > > ffff8817f2234c30
> > > > > [  330.551052] scsi host1: ib_srp: reconnect succeeded
> > > > > [  330.584552] scsi host1: ib_srp: failed RECV status WR flushed (5)
> > > > > for
> > > > > CQE
> > > > > ffff8817f2234c30
> > > > > [  344.998448] scsi host1: ib_srp: reconnect succeeded
> > > > > [  345.032115] scsi host1: ib_srp: failed RECV status WR flushed (5)
> > > > > for
> > > > > CQE
> > > > > ffff8817f2234c30
> > > > > [  359.866731] scsi host1: ib_srp: reconnect succeeded
> > > > > [  359.902114] scsi host1: ib_srp: failed RECV status WR flushed (5)
> > > > > for
> > > > > CQE
> > > > > ffff8817f2234c30
> > > > > ..
> > > > > ..
> > > > > [  373.113045] scsi host1: ib_srp: reconnect succeeded
> > > > > [  373.149511] scsi host1: ib_srp: failed RECV status WR flushed (5)
> > > > > for
> > > > > CQE
> > > > > ffff8817f2234c30
> > > > > [  388.401469] fast_io_fail_tmo expired for SRP port-1:1 / host1.
> > > > > [  388.589517] scsi host1: ib_srp: reconnect succeeded
> > > > > [  388.623462] scsi host1: ib_srp: failed RECV status WR flushed (5)
> > > > > for
> > > > > CQE
> > > > > ffff8817f2234c30
> > > > > [  403.086893] scsi host1: ib_srp: reconnect succeeded
> > > > > [  403.120876] scsi host1: ib_srp: failed RECV status WR flushed (5)
> > > > > for
> > > > > CQE
> > > > > ffff8817f2234c30
> > > > > [  403.140401] mlx5_0:dump_cqe:262:(pid 749): dump error cqe
> > > > > [  403.140402] 00000000 00000000 00000000 00000000
> > > > > [  403.140402] 00000000 00000000 00000000 00000000
> > > > > [  403.140403] 00000000 00000000 00000000 00000000
> > > > > [  403.140403] 00
> > > > >
> > > > > --
> > > > > To unsubscribe from this list: send the line "unsubscribe linux-rdma"
> > > > > in
> > > > > the body of a message to majordomo@vger.kernel.org
> > > > > More majordomo info at  http://vger.kernel.org/majordomo-info.html
> > > > >
> > > > Hello
> > > >
> > > > Let summarize where we are and how we got here.
> > > >
> > > > The last kernel I tested with mlx5 and ib_srp was vmlinuz-4.10.0-rc4
> > > > with
> > > > Barts dma patches.
> > > > All tests passed.
> > > >
> > > > I pulled Linus's tree and applied all 8 patches of the above series and
> > > > we
> > > > failed in the
> > > > "failed FAST REG status memory management" area.
> > > >
> > > > I applied only 7 of the 8 patches to Linus's tree because Bart and I
> > > > thought patch 6 of the series
> > > > may have been the catalyst.
> > > >
> > > > This also failed.
> > > >
> > > > Building from Barts tree which is based on 4.10.0-rc7 failed again.
> > > >
> > > > This made me decide to baseline Linus's tree 4.10.0-rc7 and we fail.
> > > >
> > > > So something has crept into 4.10.0-rc7 affecting this with mlx5 and
> > > > ib_srp.
> > > 
> > > From infiniband side:
> > > ➜  linux-rdma git:(queue-next) git log v4.10-rc4...v4.10-rc7 --
> > > drivers/inifiniband |wc
> > >       0       0       0
> > > 
> > > From eth nothing suspicious too:
> > > ➜  linux-rdma git:(queue-next) git l v4.10-rc4...v4.10-rc7 --
> > > drivers/net/ethernet/mellanox/mlx5
> > > d15118af2683 net/mlx5e: Check ets capability before ets query FW command
> > > a100ff3eef19 net/mlx5e: Fix update of hash function/key via ethtool
> > > 1d3398facd08 net/mlx5e: Modify TIRs hash only when it's needed
> > > 3e621b19b0bb net/mlx5e: Support TC encapsulation offloads with upper
> > > devices
> > > 5bae8c031053 net/mlx5: E-Switch, Re-enable RoCE on mode change only after
> > > FDB
> > > destroy
> > > 5403dc703ff2 net/mlx5: E-Switch, Err when retrieving steering name-space
> > > fails
> > > eff596da4878 net/mlx5: Return EOPNOTSUPP when failing to get steering
> > > name-space
> > > 9eb7892351a3 net/mlx5: Change ENOTSUPP to EOPNOTSUPP
> > > e048fc50d7bd net/mlx5e: Do not recycle pages from emergency reserve
> > > ad05df399f33 net/mlx5e: Remove unused variable
> > > 639e9e94160e net/mlx5e: Remove unnecessary checks when setting num
> > > channels
> > > abeffce90c7f net/mlx5e: Fix a -Wmaybe-uninitialized warning
> > > 
> > > 
> > > >
> > > > Thanks
> > > > Laurence
> > > 
> > 
> > Hi Leon,
> > Yep, I also looked for outliers here that may look suspicious and did not
> > see
> > any.
> > 
> > I guess I will have to start bisecting.
> > I will start with rc5, if that fails will bisect between rc4 and rc5, as we
> > know rc4 was fine.
> > 
> > I did re-run tests on rc4 last night and I was stable.
> > 
> > Thanks
> > Laurence
> > --
> > To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
> > the body of a message to majordomo@vger.kernel.org
> > More majordomo info at  http://vger.kernel.org/majordomo-info.html
> > 
> 
> OK, so 4.10.0-rc5 is fine, 4.10.0-rc6 fails, so will start bisecting.
> Unless one of you think you know what may be causing this in rc6.
> This will take time so will come back to the list once I have it isolated.
> 
> Thanks
> Laurence
> --
> To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
> 
Bisect has 8 possible kernel builds, 200 + changes, started the first one.

Thanks
Laurence
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Laurence Oberman Feb. 14, 2017, 3:02 a.m. UTC | #16
Hello Bart

The following 7 of 8 patches were applied to Linus's latest tree.

However this required first reverting 

commit ad8e66b4a80182174f73487ed25fd2140cf43361
Author: Israel Rukshin <israelr@mellanox.com>
Date:   Wed Dec 28 12:48:28 2016 +0200

See my other email regarding why the above needed to be reverted.

All tests passed in my mlx5 EDR-100 test bed for the ib-srp/mlx5 tests.

4.10.0-rc8.bart+

The revert of the above meant I did not apply and test patch 6 of the series
IB/srp: Use the IB_DEVICE_SG_GAPS_REG HCA feature if supported

  IB/srp: Avoid that duplicate responses trigger a kernel bug
  IB/srp: Fix race conditions related to task management
  IB/srp: Document locking conventions
  IB/srp: Make a diagnostic message more informative
  IB/srp: Improve an error path
  *** Not applied and not tested IB/srp: Use the IB_DEVICE_SG_GAPS_REG HCA feature if supported
  IB/core: Add support for draining IB_POLL_DIRECT completion queues
  IB/srp: Drain the send queue before destroying a QP

For the series except patch 6

Tested-by:     Laurence Oberman <loberman@redhat.com>
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Bart Van Assche Feb. 14, 2017, 5:18 p.m. UTC | #17
On Mon, 2017-02-13 at 22:02 -0500, Laurence Oberman wrote:
> The following 7 of 8 patches were applied to Linus's latest tree.
> 
> However this required first reverting 
> 
> commit ad8e66b4a80182174f73487ed25fd2140cf43361
> Author: Israel Rukshin <israelr@mellanox.com>
> Date:   Wed Dec 28 12:48:28 2016 +0200
> 
> See my other email regarding why the above needed to be reverted.
> 
> All tests passed in my mlx5 EDR-100 test bed for the ib-srp/mlx5 tests.
> 
> 4.10.0-rc8.bart+
> 
> The revert of the above meant I did not apply and test patch 6 of the series
> IB/srp: Use the IB_DEVICE_SG_GAPS_REG HCA feature if supported
> 
>   IB/srp: Avoid that duplicate responses trigger a kernel bug
>   IB/srp: Fix race conditions related to task management
>   IB/srp: Document locking conventions
>   IB/srp: Make a diagnostic message more informative
>   IB/srp: Improve an error path
>   *** Not applied and not tested IB/srp: Use the IB_DEVICE_SG_GAPS_REG HCA feature if supported
>   IB/core: Add support for draining IB_POLL_DIRECT completion queues
>   IB/srp: Drain the send queue before destroying a QP
> 
> For the series except patch 6
> 
> Tested-by:     Laurence Oberman <loberman@redhat.com>

Hello Laurence,

Thank you for the testing. However, reverting commit ad8e66b4a801 without
making any further changes is not acceptable because it would reintroduce
the SG-list mapping problem addressed by that patch. Can you test the
srp-initiator-for-next branch from my github repository against mlx5 (commit
8dca762deab6)? It passes my tests against mlx4. The patches on that branch
are:

Bart Van Assche (8):
      IB/SRP: Avoid using IB_MR_TYPE_SG_GAPS
      IB/srp: Avoid that duplicate responses trigger a kernel bug
      IB/srp: Fix race conditions related to task management
      IB/srp: Document locking conventions
      IB/srp: Make a diagnostic message more informative
      IB/srp: Improve an error path
      IB/core: Add support for draining IB_POLL_DIRECT completion queues
      IB/srp: Drain the send queue before destroying a QP

Thanks,

Bart.--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Laurence Oberman Feb. 14, 2017, 5:22 p.m. UTC | #18
----- Original Message -----
> From: "Bart Van Assche" <Bart.VanAssche@sandisk.com>
> To: leon@kernel.org, loberman@redhat.com
> Cc: hch@lst.de, maxg@mellanox.com, israelr@mellanox.com, linux-rdma@vger.kernel.org, dledford@redhat.com
> Sent: Tuesday, February 14, 2017 12:18:11 PM
> Subject: Re:   [PATCH 0/8] IB/srp bug fixes
> 
> On Mon, 2017-02-13 at 22:02 -0500, Laurence Oberman wrote:
> > The following 7 of 8 patches were applied to Linus's latest tree.
> > 
> > However this required first reverting
> > 
> > commit ad8e66b4a80182174f73487ed25fd2140cf43361
> > Author: Israel Rukshin <israelr@mellanox.com>
> > Date:   Wed Dec 28 12:48:28 2016 +0200
> > 
> > See my other email regarding why the above needed to be reverted.
> > 
> > All tests passed in my mlx5 EDR-100 test bed for the ib-srp/mlx5 tests.
> > 
> > 4.10.0-rc8.bart+
> > 
> > The revert of the above meant I did not apply and test patch 6 of the
> > series
> > IB/srp: Use the IB_DEVICE_SG_GAPS_REG HCA feature if supported
> > 
> >   IB/srp: Avoid that duplicate responses trigger a kernel bug
> >   IB/srp: Fix race conditions related to task management
> >   IB/srp: Document locking conventions
> >   IB/srp: Make a diagnostic message more informative
> >   IB/srp: Improve an error path
> >   *** Not applied and not tested IB/srp: Use the IB_DEVICE_SG_GAPS_REG HCA
> >   feature if supported
> >   IB/core: Add support for draining IB_POLL_DIRECT completion queues
> >   IB/srp: Drain the send queue before destroying a QP
> > 
> > For the series except patch 6
> > 
> > Tested-by:     Laurence Oberman <loberman@redhat.com>
> 
> Hello Laurence,
> 
> Thank you for the testing. However, reverting commit ad8e66b4a801 without
> making any further changes is not acceptable because it would reintroduce
> the SG-list mapping problem addressed by that patch. Can you test the
> srp-initiator-for-next branch from my github repository against mlx5 (commit
> 8dca762deab6)? It passes my tests against mlx4. The patches on that branch
> are:
> 
> Bart Van Assche (8):
>       IB/SRP: Avoid using IB_MR_TYPE_SG_GAPS
>       IB/srp: Avoid that duplicate responses trigger a kernel bug
>       IB/srp: Fix race conditions related to task management
>       IB/srp: Document locking conventions
>       IB/srp: Make a diagnostic message more informative
>       IB/srp: Improve an error path
>       IB/core: Add support for draining IB_POLL_DIRECT completion queues
>       IB/srp: Drain the send queue before destroying a QP
> 
> Thanks,
> 
> Bart.
> Western Digital Corporation (and its subsidiaries) E-mail Confidentiality
> Notice & Disclaimer:
> 
> This e-mail and any files transmitted with it may contain confidential or
> legally privileged information of WDC and/or its affiliates, and are
> intended solely for the use of the individual or entity to which they are
> addressed. If you are not the intended recipient, any disclosure, copying,
> distribution or any action taken or omitted to be taken in reliance on it,
> is prohibited. If you have received this e-mail in error, please notify the
> sender immediately and delete the e-mail in its entirety from your system.
> 
> 

Hello Bart, Understood, will pull and test this today.
Thank you for your assistance.

Regards
Laurence
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Laurence Oberman Feb. 14, 2017, 6:47 p.m. UTC | #19
----- Original Message -----
> From: "Bart Van Assche" <Bart.VanAssche@sandisk.com>
> To: leon@kernel.org, loberman@redhat.com
> Cc: hch@lst.de, maxg@mellanox.com, israelr@mellanox.com, linux-rdma@vger.kernel.org, dledford@redhat.com
> Sent: Tuesday, February 14, 2017 12:18:11 PM
> Subject: Re:   [PATCH 0/8] IB/srp bug fixes
> 
> On Mon, 2017-02-13 at 22:02 -0500, Laurence Oberman wrote:
> > The following 7 of 8 patches were applied to Linus's latest tree.
> > 
> > However this required first reverting
> > 
> > commit ad8e66b4a80182174f73487ed25fd2140cf43361
> > Author: Israel Rukshin <israelr@mellanox.com>
> > Date:   Wed Dec 28 12:48:28 2016 +0200
> > 
> > See my other email regarding why the above needed to be reverted.
> > 
> > All tests passed in my mlx5 EDR-100 test bed for the ib-srp/mlx5 tests.
> > 
> > 4.10.0-rc8.bart+
> > 
> > The revert of the above meant I did not apply and test patch 6 of the
> > series
> > IB/srp: Use the IB_DEVICE_SG_GAPS_REG HCA feature if supported
> > 
> >   IB/srp: Avoid that duplicate responses trigger a kernel bug
> >   IB/srp: Fix race conditions related to task management
> >   IB/srp: Document locking conventions
> >   IB/srp: Make a diagnostic message more informative
> >   IB/srp: Improve an error path
> >   *** Not applied and not tested IB/srp: Use the IB_DEVICE_SG_GAPS_REG HCA
> >   feature if supported
> >   IB/core: Add support for draining IB_POLL_DIRECT completion queues
> >   IB/srp: Drain the send queue before destroying a QP
> > 
> > For the series except patch 6
> > 
> > Tested-by:     Laurence Oberman <loberman@redhat.com>
> 
> Hello Laurence,
> 
> Thank you for the testing. However, reverting commit ad8e66b4a801 without
> making any further changes is not acceptable because it would reintroduce
> the SG-list mapping problem addressed by that patch. Can you test the
> srp-initiator-for-next branch from my github repository against mlx5 (commit
> 8dca762deab6)? It passes my tests against mlx4. The patches on that branch
> are:
> 
> Bart Van Assche (8):
>       IB/SRP: Avoid using IB_MR_TYPE_SG_GAPS
>       IB/srp: Avoid that duplicate responses trigger a kernel bug
>       IB/srp: Fix race conditions related to task management
>       IB/srp: Document locking conventions
>       IB/srp: Make a diagnostic message more informative
>       IB/srp: Improve an error path
>       IB/core: Add support for draining IB_POLL_DIRECT completion queues
>       IB/srp: Drain the send queue before destroying a QP
> 
> Thanks,
> 
> Bart.--
> To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
> 

Hello Bart

4.10.0-rc8.bart_latest+

Built from branch srp-initiator-for-next after pull of your repository.

The large I/O testing is what I focused on but all tests are passing.
small/large I/O, direct and buffered I/O, file-system and direct to mpath devices.

This is a snap of 4 simultaneous 4MB I/O read tasks and 1 buffered write task (that will sporadically exceed 4MB)/

### RECORD    7 >>> ibclient <<< (1487097890.001) (Tue Feb 14 13:44:50 2017) ###
# DISK STATISTICS (/sec)
#                   <---------reads---------------><---------writes--------------><--------averages--------> Pct
#Time     Name       KBytes Merged  IOs Size  Wait  KBytes Merged  IOs Size  Wait  RWSize  QLen  Wait SvcTim Util
13:44:50 dm-11       192512    141   47 4096    20       0      0    0    0     0    4096     1    20     21   99
13:44:50 dm-17       184320    135   45 4096    20       0      0    0    0     0    4096     1    20     22   99
13:44:50 dm-21       163840    120   40 4096    21  1236928   1984  153 8084   319    7257    91   257      5   99
13:44:50 dm-24       786432    576  192 4096     5       0      0    0    0     0    4096     1     5      5   99
13:44:50 dm-30       790528    579  193 4096     5       0      0    0    0     0    4096     1     5      5   99

It looks good Bart

For branch srp-initiator-for-next, all tests are passing.
Tested-by:     Laurence Oberman <loberman@redhat.com>

Thanks
Laurence
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Bart Van Assche Feb. 14, 2017, 6:49 p.m. UTC | #20
On Tue, 2017-02-14 at 13:47 -0500, Laurence Oberman wrote:
> For branch srp-initiator-for-next, all tests are passing.
> Tested-by:     Laurence Oberman <loberman@redhat.com>

Thank you! I will post these patches as a v2 of this series.

Bart.--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 2f85255d2aca..b50733910f7e 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -471,9 +471,13 @@  static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
  * completion handler can access the queue pair while it is
  * being destroyed.
  */
-static void srp_destroy_qp(struct ib_qp *qp)
+static void srp_destroy_qp(struct srp_rdma_ch *ch, struct ib_qp *qp)
 {
-	ib_drain_rq(qp);
+	spin_lock_irq(&ch->lock);
+	ib_process_cq_direct(ch->send_cq, -1);
+	spin_unlock_irq(&ch->lock);
+
+	ib_drain_qp(qp);
 	ib_destroy_qp(qp);
 }
 
@@ -547,7 +551,7 @@  static int srp_create_ch_ib(struct srp_rdma_ch *ch)
 	}
 
 	if (ch->qp)
-		srp_destroy_qp(ch->qp);
+		srp_destroy_qp(ch, ch->qp);
 	if (ch->recv_cq)
 		ib_free_cq(ch->recv_cq);
 	if (ch->send_cq)
@@ -571,7 +575,7 @@  static int srp_create_ch_ib(struct srp_rdma_ch *ch)
 	return 0;
 
 err_qp:
-	srp_destroy_qp(qp);
+	srp_destroy_qp(ch, qp);
 
 err_send_cq:
 	ib_free_cq(send_cq);
@@ -614,7 +618,7 @@  static void srp_free_ch_ib(struct srp_target_port *target,
 			ib_destroy_fmr_pool(ch->fmr_pool);
 	}
 
-	srp_destroy_qp(ch->qp);
+	srp_destroy_qp(ch, ch->qp);
 	ib_free_cq(ch->send_cq);
 	ib_free_cq(ch->recv_cq);
 
@@ -1827,6 +1831,11 @@  static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
 	return iu;
 }
 
+/*
+ * Note: if this function is called from inside ib_drain_sq() then it will
+ * be called without ch->lock being held. If ib_drain_sq() dequeues a WQE
+ * with status IB_WC_SUCCESS then that's a bug.
+ */
 static void srp_send_done(struct ib_cq *cq, struct ib_wc *wc)
 {
 	struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);