diff mbox series

ksmbd: set the rdma capability of network adapter using ib_client

Message ID 20211222224306.198076-1-hyc.lee@gmail.com (mailing list archive)
State New, archived
Headers show
Series ksmbd: set the rdma capability of network adapter using ib_client | expand

Commit Message

Hyunchul Lee Dec. 22, 2021, 10:43 p.m. UTC
Set the rdma capability using ib_clien, Because
For some devices, ib_device_get_by_netdev() returns NULL.

Signed-off-by: Hyunchul Lee <hyc.lee@gmail.com>
---
 fs/ksmbd/transport_rdma.c | 89 ++++++++++++++++++++++++++++++++++++---
 1 file changed, 83 insertions(+), 6 deletions(-)

Comments

Namjae Jeon Dec. 24, 2021, 7:27 a.m. UTC | #1
2021-12-23 7:43 GMT+09:00, Hyunchul Lee <hyc.lee@gmail.com>:
> Set the rdma capability using ib_clien, Because
> For some devices, ib_device_get_by_netdev() returns NULL.
>
> Signed-off-by: Hyunchul Lee <hyc.lee@gmail.com>
> ---
>  fs/ksmbd/transport_rdma.c | 89 ++++++++++++++++++++++++++++++++++++---
>  1 file changed, 83 insertions(+), 6 deletions(-)
>
> diff --git a/fs/ksmbd/transport_rdma.c b/fs/ksmbd/transport_rdma.c
> index 7e57cbb0bb35..0820c6a9d75e 100644
> --- a/fs/ksmbd/transport_rdma.c
> +++ b/fs/ksmbd/transport_rdma.c
> @@ -79,6 +79,14 @@ static int smb_direct_max_read_write_size = 1024 * 1024;
>
>  static int smb_direct_max_outstanding_rw_ops = 8;
>
> +static LIST_HEAD(smb_direct_device_list);
> +static DEFINE_RWLOCK(smb_direct_device_lock);
> +
> +struct smb_direct_device {
> +	struct ib_device	*ib_dev;
> +	struct list_head	list;
> +};
> +
>  static struct smb_direct_listener {
>  	struct rdma_cm_id	*cm_id;
>  } smb_direct_listener;
> @@ -2007,12 +2015,62 @@ static int smb_direct_listen(int port)
>  	return ret;
>  }
>
> +static int smb_direct_ib_client_add(struct ib_device *ib_dev)
> +{
> +	struct smb_direct_device *smb_dev;
> +
> +	if (!ib_dev->ops.get_netdev ||
> +	    ib_dev->node_type != RDMA_NODE_IB_CA ||
> +	    !rdma_frwr_is_supported(&ib_dev->attrs))
> +		return 0;
> +
> +	smb_dev = kzalloc(sizeof(*smb_dev), GFP_KERNEL);
> +	if (!smb_dev)
> +		return -ENOMEM;
> +	smb_dev->ib_dev = ib_dev;
> +
> +	write_lock(&smb_direct_device_lock);
> +	list_add(&smb_dev->list, &smb_direct_device_list);
> +	write_unlock(&smb_direct_device_lock);
> +
> +	ksmbd_debug(RDMA, "ib device added: name %s\n", ib_dev->name);
> +	return 0;
> +}
> +
> +static void smb_direct_ib_client_remove(struct ib_device *ib_dev,
> +					void *client_data)
> +{
When is this function called? Is rdma shutdown needed here?

> +	struct smb_direct_device *smb_dev, *tmp;
> +
> +	write_lock(&smb_direct_device_lock);
> +	list_for_each_entry_safe(smb_dev, tmp, &smb_direct_device_list, list) {
> +		if (smb_dev->ib_dev == ib_dev) {
> +			list_del(&smb_dev->list);
> +			kfree(smb_dev);
> +			break;
> +		}
> +	}
> +	write_unlock(&smb_direct_device_lock);
> +}
> +
> +static struct ib_client smb_direct_ib_client = {
> +	.name	= "ksmbd_smb_direct_ib",
Move smb_direct_ib_client_add and smb_direct_ib_client_remove to here.

> +};
> +
>  int ksmbd_rdma_init(void)
>  {
>  	int ret;
>
>  	smb_direct_listener.cm_id = NULL;
>
> +	smb_direct_ib_client.add = smb_direct_ib_client_add;
> +	smb_direct_ib_client.remove = smb_direct_ib_client_remove;
> +	ret = ib_register_client(&smb_direct_ib_client);
> +	if (ret) {
> +		pr_err("failed to ib_register_client\n");
> +		return ret;
> +	}
> +
>  	/* When a client is running out of send credits, the credits are
>  	 * granted by the server's sending a packet using this queue.
>  	 * This avoids the situation that a clients cannot send packets
> @@ -2046,20 +2104,39 @@ int ksmbd_rdma_destroy(void)
>  		destroy_workqueue(smb_direct_wq);
>  		smb_direct_wq = NULL;
>  	}
> +
> +	if (smb_direct_ib_client.add)
> +		ib_unregister_client(&smb_direct_ib_client);
> +	smb_direct_ib_client.add = NULL;
Why ?

>  	return 0;
>  }
>
>  bool ksmbd_rdma_capable_netdev(struct net_device *netdev)
>  {
> -	struct ib_device *ibdev;
> +	struct smb_direct_device *smb_dev;
> +	int i;
>  	bool rdma_capable = false;
>
> -	ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_UNKNOWN);
> -	if (ibdev) {
> -		if (rdma_frwr_is_supported(&ibdev->attrs))
> -			rdma_capable = true;
> -		ib_device_put(ibdev);
> +	read_lock(&smb_direct_device_lock);
> +	list_for_each_entry(smb_dev, &smb_direct_device_list, list) {
> +		for (i = 0; i < smb_dev->ib_dev->phys_port_cnt; i++) {
> +			struct net_device *ndev;
> +
> +			ndev = smb_dev->ib_dev->ops.get_netdev(smb_dev->ib_dev,
> +							       i + 1);
> +			if (!ndev)
> +				continue;
> +
> +			if (ndev == netdev) {
> +				dev_put(ndev);
> +				rdma_capable = true;
> +				goto out;
> +			}
> +			dev_put(ndev);
> +		}
>  	}
> +out:
> +	read_unlock(&smb_direct_device_lock);
>  	return rdma_capable;
>  }
>
> --
> 2.25.1
>
>
Hyunchul Lee Dec. 24, 2021, 10:53 p.m. UTC | #2
2021년 12월 24일 (금) 오후 4:27, Namjae Jeon <linkinjeon@kernel.org>님이 작성:
>
> 2021-12-23 7:43 GMT+09:00, Hyunchul Lee <hyc.lee@gmail.com>:
> > Set the rdma capability using ib_clien, Because
> > For some devices, ib_device_get_by_netdev() returns NULL.
> >
> > Signed-off-by: Hyunchul Lee <hyc.lee@gmail.com>
> > ---
> >  fs/ksmbd/transport_rdma.c | 89 ++++++++++++++++++++++++++++++++++++---
> >  1 file changed, 83 insertions(+), 6 deletions(-)
> >
> > diff --git a/fs/ksmbd/transport_rdma.c b/fs/ksmbd/transport_rdma.c
> > index 7e57cbb0bb35..0820c6a9d75e 100644
> > --- a/fs/ksmbd/transport_rdma.c
> > +++ b/fs/ksmbd/transport_rdma.c
> > @@ -79,6 +79,14 @@ static int smb_direct_max_read_write_size = 1024 * 1024;
> >
> >  static int smb_direct_max_outstanding_rw_ops = 8;
> >
> > +static LIST_HEAD(smb_direct_device_list);
> > +static DEFINE_RWLOCK(smb_direct_device_lock);
> > +
> > +struct smb_direct_device {
> > +     struct ib_device        *ib_dev;
> > +     struct list_head        list;
> > +};
> > +
> >  static struct smb_direct_listener {
> >       struct rdma_cm_id       *cm_id;
> >  } smb_direct_listener;
> > @@ -2007,12 +2015,62 @@ static int smb_direct_listen(int port)
> >       return ret;
> >  }
> >
> > +static int smb_direct_ib_client_add(struct ib_device *ib_dev)
> > +{
> > +     struct smb_direct_device *smb_dev;
> > +
> > +     if (!ib_dev->ops.get_netdev ||
> > +         ib_dev->node_type != RDMA_NODE_IB_CA ||
> > +         !rdma_frwr_is_supported(&ib_dev->attrs))
> > +             return 0;
> > +
> > +     smb_dev = kzalloc(sizeof(*smb_dev), GFP_KERNEL);
> > +     if (!smb_dev)
> > +             return -ENOMEM;
> > +     smb_dev->ib_dev = ib_dev;
> > +
> > +     write_lock(&smb_direct_device_lock);
> > +     list_add(&smb_dev->list, &smb_direct_device_list);
> > +     write_unlock(&smb_direct_device_lock);
> > +
> > +     ksmbd_debug(RDMA, "ib device added: name %s\n", ib_dev->name);
> > +     return 0;
> > +}
> > +
> > +static void smb_direct_ib_client_remove(struct ib_device *ib_dev,
> > +                                     void *client_data)
> > +{
> When is this function called? Is rdma shutdown needed here?
>

This is called when a device is unplugged. After this call,
RDMA_CM_EVENT_DEVICE_REMOVAL event is generated.
And ksmbd handles this event.

> > +     struct smb_direct_device *smb_dev, *tmp;
> > +
> > +     write_lock(&smb_direct_device_lock);
> > +     list_for_each_entry_safe(smb_dev, tmp, &smb_direct_device_list, list) {
> > +             if (smb_dev->ib_dev == ib_dev) {
> > +                     list_del(&smb_dev->list);
> > +                     kfree(smb_dev);
> > +                     break;
> > +             }
> > +     }
> > +     write_unlock(&smb_direct_device_lock);
> > +}
> > +
> > +static struct ib_client smb_direct_ib_client = {
> > +     .name   = "ksmbd_smb_direct_ib",
> Move smb_direct_ib_client_add and smb_direct_ib_client_remove to here.
>
> > +};
> > +
> >  int ksmbd_rdma_init(void)
> >  {
> >       int ret;
> >
> >       smb_direct_listener.cm_id = NULL;
> >
> > +     smb_direct_ib_client.add = smb_direct_ib_client_add;
> > +     smb_direct_ib_client.remove = smb_direct_ib_client_remove;
> > +     ret = ib_register_client(&smb_direct_ib_client);
> > +     if (ret) {
> > +             pr_err("failed to ib_register_client\n");
> > +             return ret;
> > +     }
> > +
> >       /* When a client is running out of send credits, the credits are
> >        * granted by the server's sending a packet using this queue.
> >        * This avoids the situation that a clients cannot send packets
> > @@ -2046,20 +2104,39 @@ int ksmbd_rdma_destroy(void)
> >               destroy_workqueue(smb_direct_wq);
> >               smb_direct_wq = NULL;
> >       }
> > +
> > +     if (smb_direct_ib_client.add)
> > +             ib_unregister_client(&smb_direct_ib_client);
> > +     smb_direct_ib_client.add = NULL;
> Why ?
>

To avoid multiple calls of ib_unregister_client, because
ksmbd_rdma_destory() can be called without ksmbd_rdma_init().
I will clean two functions and remove these statements.

> >       return 0;
> >  }
> >
> >  bool ksmbd_rdma_capable_netdev(struct net_device *netdev)
> >  {
> > -     struct ib_device *ibdev;
> > +     struct smb_direct_device *smb_dev;
> > +     int i;
> >       bool rdma_capable = false;
> >
> > -     ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_UNKNOWN);
> > -     if (ibdev) {
> > -             if (rdma_frwr_is_supported(&ibdev->attrs))
> > -                     rdma_capable = true;
> > -             ib_device_put(ibdev);
> > +     read_lock(&smb_direct_device_lock);
> > +     list_for_each_entry(smb_dev, &smb_direct_device_list, list) {
> > +             for (i = 0; i < smb_dev->ib_dev->phys_port_cnt; i++) {
> > +                     struct net_device *ndev;
> > +
> > +                     ndev = smb_dev->ib_dev->ops.get_netdev(smb_dev->ib_dev,
> > +                                                            i + 1);
> > +                     if (!ndev)
> > +                             continue;
> > +
> > +                     if (ndev == netdev) {
> > +                             dev_put(ndev);
> > +                             rdma_capable = true;
> > +                             goto out;
> > +                     }
> > +                     dev_put(ndev);
> > +             }
> >       }
> > +out:
> > +     read_unlock(&smb_direct_device_lock);
> >       return rdma_capable;
> >  }
> >
> > --
> > 2.25.1
> >
> >
Namjae Jeon Dec. 24, 2021, 11:04 p.m. UTC | #3
2021-12-25 7:53 GMT+09:00, Hyunchul Lee <hyc.lee@gmail.com>:
> 2021년 12월 24일 (금) 오후 4:27, Namjae Jeon <linkinjeon@kernel.org>님이 작성:
>>
>> 2021-12-23 7:43 GMT+09:00, Hyunchul Lee <hyc.lee@gmail.com>:
>> > Set the rdma capability using ib_clien, Because
>> > For some devices, ib_device_get_by_netdev() returns NULL.
>> >
>> > Signed-off-by: Hyunchul Lee <hyc.lee@gmail.com>
>> > ---
>> >  fs/ksmbd/transport_rdma.c | 89 ++++++++++++++++++++++++++++++++++++---
>> >  1 file changed, 83 insertions(+), 6 deletions(-)
>> >
>> > diff --git a/fs/ksmbd/transport_rdma.c b/fs/ksmbd/transport_rdma.c
>> > index 7e57cbb0bb35..0820c6a9d75e 100644
>> > --- a/fs/ksmbd/transport_rdma.c
>> > +++ b/fs/ksmbd/transport_rdma.c
>> > @@ -79,6 +79,14 @@ static int smb_direct_max_read_write_size = 1024 *
>> > 1024;
>> >
>> >  static int smb_direct_max_outstanding_rw_ops = 8;
>> >
>> > +static LIST_HEAD(smb_direct_device_list);
>> > +static DEFINE_RWLOCK(smb_direct_device_lock);
>> > +
>> > +struct smb_direct_device {
>> > +     struct ib_device        *ib_dev;
>> > +     struct list_head        list;
>> > +};
>> > +
>> >  static struct smb_direct_listener {
>> >       struct rdma_cm_id       *cm_id;
>> >  } smb_direct_listener;
>> > @@ -2007,12 +2015,62 @@ static int smb_direct_listen(int port)
>> >       return ret;
>> >  }
>> >
>> > +static int smb_direct_ib_client_add(struct ib_device *ib_dev)
>> > +{
>> > +     struct smb_direct_device *smb_dev;
>> > +
>> > +     if (!ib_dev->ops.get_netdev ||
>> > +         ib_dev->node_type != RDMA_NODE_IB_CA ||
>> > +         !rdma_frwr_is_supported(&ib_dev->attrs))
>> > +             return 0;
>> > +
>> > +     smb_dev = kzalloc(sizeof(*smb_dev), GFP_KERNEL);
>> > +     if (!smb_dev)
>> > +             return -ENOMEM;
>> > +     smb_dev->ib_dev = ib_dev;
>> > +
>> > +     write_lock(&smb_direct_device_lock);
>> > +     list_add(&smb_dev->list, &smb_direct_device_list);
>> > +     write_unlock(&smb_direct_device_lock);
>> > +
>> > +     ksmbd_debug(RDMA, "ib device added: name %s\n", ib_dev->name);
>> > +     return 0;
>> > +}
>> > +
>> > +static void smb_direct_ib_client_remove(struct ib_device *ib_dev,
>> > +                                     void *client_data)
>> > +{
>> When is this function called? Is rdma shutdown needed here?
>>
>
> This is called when a device is unplugged. After this call,
> RDMA_CM_EVENT_DEVICE_REMOVAL event is generated.
> And ksmbd handles this event.
Okay, Have you actually tested it while unplugging NICs?
And This patch doesn't work on Chelsio NICs(iWARP).

>
>> > +     struct smb_direct_device *smb_dev, *tmp;
>> > +
>> > +     write_lock(&smb_direct_device_lock);
>> > +     list_for_each_entry_safe(smb_dev, tmp, &smb_direct_device_list,
>> > list) {
>> > +             if (smb_dev->ib_dev == ib_dev) {
>> > +                     list_del(&smb_dev->list);
>> > +                     kfree(smb_dev);
>> > +                     break;
>> > +             }
>> > +     }
>> > +     write_unlock(&smb_direct_device_lock);
>> > +}
>> > +
>> > +static struct ib_client smb_direct_ib_client = {
>> > +     .name   = "ksmbd_smb_direct_ib",
>> Move smb_direct_ib_client_add and smb_direct_ib_client_remove to here.
>>
>> > +};
>> > +
>> >  int ksmbd_rdma_init(void)
>> >  {
>> >       int ret;
>> >
>> >       smb_direct_listener.cm_id = NULL;
>> >
>> > +     smb_direct_ib_client.add = smb_direct_ib_client_add;
>> > +     smb_direct_ib_client.remove = smb_direct_ib_client_remove;
>> > +     ret = ib_register_client(&smb_direct_ib_client);
>> > +     if (ret) {
>> > +             pr_err("failed to ib_register_client\n");
>> > +             return ret;
>> > +     }
>> > +
>> >       /* When a client is running out of send credits, the credits are
>> >        * granted by the server's sending a packet using this queue.
>> >        * This avoids the situation that a clients cannot send packets
>> > @@ -2046,20 +2104,39 @@ int ksmbd_rdma_destroy(void)
>> >               destroy_workqueue(smb_direct_wq);
>> >               smb_direct_wq = NULL;
>> >       }
>> > +
>> > +     if (smb_direct_ib_client.add)
>> > +             ib_unregister_client(&smb_direct_ib_client);
>> > +     smb_direct_ib_client.add = NULL;
>> Why ?
>>
>
> To avoid multiple calls of ib_unregister_client, because
> ksmbd_rdma_destory() can be called without ksmbd_rdma_init().
> I will clean two functions and remove these statements.
>
>> >       return 0;
>> >  }
>> >
>> >  bool ksmbd_rdma_capable_netdev(struct net_device *netdev)
>> >  {
>> > -     struct ib_device *ibdev;
>> > +     struct smb_direct_device *smb_dev;
>> > +     int i;
>> >       bool rdma_capable = false;
>> >
>> > -     ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_UNKNOWN);
>> > -     if (ibdev) {
>> > -             if (rdma_frwr_is_supported(&ibdev->attrs))
>> > -                     rdma_capable = true;
>> > -             ib_device_put(ibdev);
>> > +     read_lock(&smb_direct_device_lock);
>> > +     list_for_each_entry(smb_dev, &smb_direct_device_list, list) {
>> > +             for (i = 0; i < smb_dev->ib_dev->phys_port_cnt; i++) {
>> > +                     struct net_device *ndev;
>> > +
>> > +                     ndev =
>> > smb_dev->ib_dev->ops.get_netdev(smb_dev->ib_dev,
>> > +                                                            i + 1);
>> > +                     if (!ndev)
>> > +                             continue;
>> > +
>> > +                     if (ndev == netdev) {
>> > +                             dev_put(ndev);
>> > +                             rdma_capable = true;
>> > +                             goto out;
>> > +                     }
>> > +                     dev_put(ndev);
>> > +             }
>> >       }
>> > +out:
>> > +     read_unlock(&smb_direct_device_lock);
>> >       return rdma_capable;
>> >  }
>> >
>> > --
>> > 2.25.1
>> >
>> >
>
>
>
> --
> Thanks,
> Hyunchul
>
Hyunchul Lee Dec. 24, 2021, 11:38 p.m. UTC | #4
2021년 12월 25일 (토) 오전 8:05, Namjae Jeon <linkinjeon@kernel.org>님이 작성:
>
> 2021-12-25 7:53 GMT+09:00, Hyunchul Lee <hyc.lee@gmail.com>:
> > 2021년 12월 24일 (금) 오후 4:27, Namjae Jeon <linkinjeon@kernel.org>님이 작성:
> >>
> >> 2021-12-23 7:43 GMT+09:00, Hyunchul Lee <hyc.lee@gmail.com>:
> >> > Set the rdma capability using ib_clien, Because
> >> > For some devices, ib_device_get_by_netdev() returns NULL.
> >> >
> >> > Signed-off-by: Hyunchul Lee <hyc.lee@gmail.com>
> >> > ---
> >> >  fs/ksmbd/transport_rdma.c | 89 ++++++++++++++++++++++++++++++++++++---
> >> >  1 file changed, 83 insertions(+), 6 deletions(-)
> >> >
> >> > diff --git a/fs/ksmbd/transport_rdma.c b/fs/ksmbd/transport_rdma.c
> >> > index 7e57cbb0bb35..0820c6a9d75e 100644
> >> > --- a/fs/ksmbd/transport_rdma.c
> >> > +++ b/fs/ksmbd/transport_rdma.c
> >> > @@ -79,6 +79,14 @@ static int smb_direct_max_read_write_size = 1024 *
> >> > 1024;
> >> >
> >> >  static int smb_direct_max_outstanding_rw_ops = 8;
> >> >
> >> > +static LIST_HEAD(smb_direct_device_list);
> >> > +static DEFINE_RWLOCK(smb_direct_device_lock);
> >> > +
> >> > +struct smb_direct_device {
> >> > +     struct ib_device        *ib_dev;
> >> > +     struct list_head        list;
> >> > +};
> >> > +
> >> >  static struct smb_direct_listener {
> >> >       struct rdma_cm_id       *cm_id;
> >> >  } smb_direct_listener;
> >> > @@ -2007,12 +2015,62 @@ static int smb_direct_listen(int port)
> >> >       return ret;
> >> >  }
> >> >
> >> > +static int smb_direct_ib_client_add(struct ib_device *ib_dev)
> >> > +{
> >> > +     struct smb_direct_device *smb_dev;
> >> > +
> >> > +     if (!ib_dev->ops.get_netdev ||
> >> > +         ib_dev->node_type != RDMA_NODE_IB_CA ||
> >> > +         !rdma_frwr_is_supported(&ib_dev->attrs))
> >> > +             return 0;
> >> > +
> >> > +     smb_dev = kzalloc(sizeof(*smb_dev), GFP_KERNEL);
> >> > +     if (!smb_dev)
> >> > +             return -ENOMEM;
> >> > +     smb_dev->ib_dev = ib_dev;
> >> > +
> >> > +     write_lock(&smb_direct_device_lock);
> >> > +     list_add(&smb_dev->list, &smb_direct_device_list);
> >> > +     write_unlock(&smb_direct_device_lock);
> >> > +
> >> > +     ksmbd_debug(RDMA, "ib device added: name %s\n", ib_dev->name);
> >> > +     return 0;
> >> > +}
> >> > +
> >> > +static void smb_direct_ib_client_remove(struct ib_device *ib_dev,
> >> > +                                     void *client_data)
> >> > +{
> >> When is this function called? Is rdma shutdown needed here?
> >>
> >
> > This is called when a device is unplugged. After this call,
> > RDMA_CM_EVENT_DEVICE_REMOVAL event is generated.
> > And ksmbd handles this event.
> Okay, Have you actually tested it while unplugging NICs?
> And This patch doesn't work on Chelsio NICs(iWARP).
>

Not yet. I will test it but need some time because of
setting up my test environment.

Is the rdma capability set on Chelsio NICs without
this patch?

> >
> >> > +     struct smb_direct_device *smb_dev, *tmp;
> >> > +
> >> > +     write_lock(&smb_direct_device_lock);
> >> > +     list_for_each_entry_safe(smb_dev, tmp, &smb_direct_device_list,
> >> > list) {
> >> > +             if (smb_dev->ib_dev == ib_dev) {
> >> > +                     list_del(&smb_dev->list);
> >> > +                     kfree(smb_dev);
> >> > +                     break;
> >> > +             }
> >> > +     }
> >> > +     write_unlock(&smb_direct_device_lock);
> >> > +}
> >> > +
> >> > +static struct ib_client smb_direct_ib_client = {
> >> > +     .name   = "ksmbd_smb_direct_ib",
> >> Move smb_direct_ib_client_add and smb_direct_ib_client_remove to here.
> >>
> >> > +};
> >> > +
> >> >  int ksmbd_rdma_init(void)
> >> >  {
> >> >       int ret;
> >> >
> >> >       smb_direct_listener.cm_id = NULL;
> >> >
> >> > +     smb_direct_ib_client.add = smb_direct_ib_client_add;
> >> > +     smb_direct_ib_client.remove = smb_direct_ib_client_remove;
> >> > +     ret = ib_register_client(&smb_direct_ib_client);
> >> > +     if (ret) {
> >> > +             pr_err("failed to ib_register_client\n");
> >> > +             return ret;
> >> > +     }
> >> > +
> >> >       /* When a client is running out of send credits, the credits are
> >> >        * granted by the server's sending a packet using this queue.
> >> >        * This avoids the situation that a clients cannot send packets
> >> > @@ -2046,20 +2104,39 @@ int ksmbd_rdma_destroy(void)
> >> >               destroy_workqueue(smb_direct_wq);
> >> >               smb_direct_wq = NULL;
> >> >       }
> >> > +
> >> > +     if (smb_direct_ib_client.add)
> >> > +             ib_unregister_client(&smb_direct_ib_client);
> >> > +     smb_direct_ib_client.add = NULL;
> >> Why ?
> >>
> >
> > To avoid multiple calls of ib_unregister_client, because
> > ksmbd_rdma_destory() can be called without ksmbd_rdma_init().
> > I will clean two functions and remove these statements.
> >
> >> >       return 0;
> >> >  }
> >> >
> >> >  bool ksmbd_rdma_capable_netdev(struct net_device *netdev)
> >> >  {
> >> > -     struct ib_device *ibdev;
> >> > +     struct smb_direct_device *smb_dev;
> >> > +     int i;
> >> >       bool rdma_capable = false;
> >> >
> >> > -     ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_UNKNOWN);
> >> > -     if (ibdev) {
> >> > -             if (rdma_frwr_is_supported(&ibdev->attrs))
> >> > -                     rdma_capable = true;
> >> > -             ib_device_put(ibdev);
> >> > +     read_lock(&smb_direct_device_lock);
> >> > +     list_for_each_entry(smb_dev, &smb_direct_device_list, list) {
> >> > +             for (i = 0; i < smb_dev->ib_dev->phys_port_cnt; i++) {
> >> > +                     struct net_device *ndev;
> >> > +
> >> > +                     ndev =
> >> > smb_dev->ib_dev->ops.get_netdev(smb_dev->ib_dev,
> >> > +                                                            i + 1);
> >> > +                     if (!ndev)
> >> > +                             continue;
> >> > +
> >> > +                     if (ndev == netdev) {
> >> > +                             dev_put(ndev);
> >> > +                             rdma_capable = true;
> >> > +                             goto out;
> >> > +                     }
> >> > +                     dev_put(ndev);
> >> > +             }
> >> >       }
> >> > +out:
> >> > +     read_unlock(&smb_direct_device_lock);
> >> >       return rdma_capable;
> >> >  }
> >> >
> >> > --
> >> > 2.25.1
> >> >
> >> >
> >
> >
> >
> > --
> > Thanks,
> > Hyunchul
> >
Namjae Jeon Dec. 25, 2021, 4:24 a.m. UTC | #5
2021-12-25 8:38 GMT+09:00, Hyunchul Lee <hyc.lee@gmail.com>:
> 2021년 12월 25일 (토) 오전 8:05, Namjae Jeon <linkinjeon@kernel.org>님이 작성:
>>
>> 2021-12-25 7:53 GMT+09:00, Hyunchul Lee <hyc.lee@gmail.com>:
>> > 2021년 12월 24일 (금) 오후 4:27, Namjae Jeon <linkinjeon@kernel.org>님이 작성:
>> >>
>> >> 2021-12-23 7:43 GMT+09:00, Hyunchul Lee <hyc.lee@gmail.com>:
>> >> > Set the rdma capability using ib_clien, Because
>> >> > For some devices, ib_device_get_by_netdev() returns NULL.
>> >> >
>> >> > Signed-off-by: Hyunchul Lee <hyc.lee@gmail.com>
>> >> > ---
>> >> >  fs/ksmbd/transport_rdma.c | 89
>> >> > ++++++++++++++++++++++++++++++++++++---
>> >> >  1 file changed, 83 insertions(+), 6 deletions(-)
>> >> >
>> >> > diff --git a/fs/ksmbd/transport_rdma.c b/fs/ksmbd/transport_rdma.c
>> >> > index 7e57cbb0bb35..0820c6a9d75e 100644
>> >> > --- a/fs/ksmbd/transport_rdma.c
>> >> > +++ b/fs/ksmbd/transport_rdma.c
>> >> > @@ -79,6 +79,14 @@ static int smb_direct_max_read_write_size = 1024
>> >> > *
>> >> > 1024;
>> >> >
>> >> >  static int smb_direct_max_outstanding_rw_ops = 8;
>> >> >
>> >> > +static LIST_HEAD(smb_direct_device_list);
>> >> > +static DEFINE_RWLOCK(smb_direct_device_lock);
>> >> > +
>> >> > +struct smb_direct_device {
>> >> > +     struct ib_device        *ib_dev;
>> >> > +     struct list_head        list;
>> >> > +};
>> >> > +
>> >> >  static struct smb_direct_listener {
>> >> >       struct rdma_cm_id       *cm_id;
>> >> >  } smb_direct_listener;
>> >> > @@ -2007,12 +2015,62 @@ static int smb_direct_listen(int port)
>> >> >       return ret;
>> >> >  }
>> >> >
>> >> > +static int smb_direct_ib_client_add(struct ib_device *ib_dev)
>> >> > +{
>> >> > +     struct smb_direct_device *smb_dev;
>> >> > +
>> >> > +     if (!ib_dev->ops.get_netdev ||
>> >> > +         ib_dev->node_type != RDMA_NODE_IB_CA ||
>> >> > +         !rdma_frwr_is_supported(&ib_dev->attrs))
>> >> > +             return 0;
>> >> > +
>> >> > +     smb_dev = kzalloc(sizeof(*smb_dev), GFP_KERNEL);
>> >> > +     if (!smb_dev)
>> >> > +             return -ENOMEM;
>> >> > +     smb_dev->ib_dev = ib_dev;
>> >> > +
>> >> > +     write_lock(&smb_direct_device_lock);
>> >> > +     list_add(&smb_dev->list, &smb_direct_device_list);
>> >> > +     write_unlock(&smb_direct_device_lock);
>> >> > +
>> >> > +     ksmbd_debug(RDMA, "ib device added: name %s\n", ib_dev->name);
>> >> > +     return 0;
>> >> > +}
>> >> > +
>> >> > +static void smb_direct_ib_client_remove(struct ib_device *ib_dev,
>> >> > +                                     void *client_data)
>> >> > +{
>> >> When is this function called? Is rdma shutdown needed here?
>> >>
>> >
>> > This is called when a device is unplugged. After this call,
>> > RDMA_CM_EVENT_DEVICE_REMOVAL event is generated.
>> > And ksmbd handles this event.
>> Okay, Have you actually tested it while unplugging NICs?
>> And This patch doesn't work on Chelsio NICs(iWARP).
>>
>
> Not yet. I will test it but need some time because of
> setting up my test environment.
Don't send untested patch to the list.
>
> Is the rdma capability set on Chelsio NICs without
> this patch?
Yes.
>
>> >
>> >> > +     struct smb_direct_device *smb_dev, *tmp;
>> >> > +
>> >> > +     write_lock(&smb_direct_device_lock);
>> >> > +     list_for_each_entry_safe(smb_dev, tmp,
>> >> > &smb_direct_device_list,
>> >> > list) {
>> >> > +             if (smb_dev->ib_dev == ib_dev) {
>> >> > +                     list_del(&smb_dev->list);
>> >> > +                     kfree(smb_dev);
>> >> > +                     break;
>> >> > +             }
>> >> > +     }
>> >> > +     write_unlock(&smb_direct_device_lock);
>> >> > +}
>> >> > +
>> >> > +static struct ib_client smb_direct_ib_client = {
>> >> > +     .name   = "ksmbd_smb_direct_ib",
>> >> Move smb_direct_ib_client_add and smb_direct_ib_client_remove to here.
>> >>
>> >> > +};
>> >> > +
>> >> >  int ksmbd_rdma_init(void)
>> >> >  {
>> >> >       int ret;
>> >> >
>> >> >       smb_direct_listener.cm_id = NULL;
>> >> >
>> >> > +     smb_direct_ib_client.add = smb_direct_ib_client_add;
>> >> > +     smb_direct_ib_client.remove = smb_direct_ib_client_remove;
>> >> > +     ret = ib_register_client(&smb_direct_ib_client);
>> >> > +     if (ret) {
>> >> > +             pr_err("failed to ib_register_client\n");
>> >> > +             return ret;
>> >> > +     }
>> >> > +
>> >> >       /* When a client is running out of send credits, the credits
>> >> > are
>> >> >        * granted by the server's sending a packet using this queue.
>> >> >        * This avoids the situation that a clients cannot send
>> >> > packets
>> >> > @@ -2046,20 +2104,39 @@ int ksmbd_rdma_destroy(void)
>> >> >               destroy_workqueue(smb_direct_wq);
>> >> >               smb_direct_wq = NULL;
>> >> >       }
>> >> > +
>> >> > +     if (smb_direct_ib_client.add)
>> >> > +             ib_unregister_client(&smb_direct_ib_client);
>> >> > +     smb_direct_ib_client.add = NULL;
>> >> Why ?
>> >>
>> >
>> > To avoid multiple calls of ib_unregister_client, because
>> > ksmbd_rdma_destory() can be called without ksmbd_rdma_init().
>> > I will clean two functions and remove these statements.
>> >
>> >> >       return 0;
>> >> >  }
>> >> >
>> >> >  bool ksmbd_rdma_capable_netdev(struct net_device *netdev)
>> >> >  {
>> >> > -     struct ib_device *ibdev;
>> >> > +     struct smb_direct_device *smb_dev;
>> >> > +     int i;
>> >> >       bool rdma_capable = false;
>> >> >
>> >> > -     ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_UNKNOWN);
>> >> > -     if (ibdev) {
>> >> > -             if (rdma_frwr_is_supported(&ibdev->attrs))
>> >> > -                     rdma_capable = true;
>> >> > -             ib_device_put(ibdev);
>> >> > +     read_lock(&smb_direct_device_lock);
>> >> > +     list_for_each_entry(smb_dev, &smb_direct_device_list, list) {
>> >> > +             for (i = 0; i < smb_dev->ib_dev->phys_port_cnt; i++) {
>> >> > +                     struct net_device *ndev;
>> >> > +
>> >> > +                     ndev =
>> >> > smb_dev->ib_dev->ops.get_netdev(smb_dev->ib_dev,
>> >> > +                                                            i + 1);
>> >> > +                     if (!ndev)
>> >> > +                             continue;
>> >> > +
>> >> > +                     if (ndev == netdev) {
>> >> > +                             dev_put(ndev);
>> >> > +                             rdma_capable = true;
>> >> > +                             goto out;
>> >> > +                     }
>> >> > +                     dev_put(ndev);
>> >> > +             }
>> >> >       }
>> >> > +out:
>> >> > +     read_unlock(&smb_direct_device_lock);
>> >> >       return rdma_capable;
>> >> >  }
>> >> >
>> >> > --
>> >> > 2.25.1
>> >> >
>> >> >
>> >
>> >
>> >
>> > --
>> > Thanks,
>> > Hyunchul
>> >
>
>
>
> --
> Thanks,
> Hyunchul
>
Hyunchul Lee Dec. 25, 2021, 11:29 a.m. UTC | #6
2021년 12월 25일 (토) 오후 1:24, Namjae Jeon <linkinjeon@kernel.org>님이 작성:
>
> 2021-12-25 8:38 GMT+09:00, Hyunchul Lee <hyc.lee@gmail.com>:
> > 2021년 12월 25일 (토) 오전 8:05, Namjae Jeon <linkinjeon@kernel.org>님이 작성:
> >>
> >> 2021-12-25 7:53 GMT+09:00, Hyunchul Lee <hyc.lee@gmail.com>:
> >> > 2021년 12월 24일 (금) 오후 4:27, Namjae Jeon <linkinjeon@kernel.org>님이 작성:
> >> >>
> >> >> 2021-12-23 7:43 GMT+09:00, Hyunchul Lee <hyc.lee@gmail.com>:
> >> >> > Set the rdma capability using ib_clien, Because
> >> >> > For some devices, ib_device_get_by_netdev() returns NULL.
> >> >> >
> >> >> > Signed-off-by: Hyunchul Lee <hyc.lee@gmail.com>
> >> >> > ---
> >> >> >  fs/ksmbd/transport_rdma.c | 89
> >> >> > ++++++++++++++++++++++++++++++++++++---
> >> >> >  1 file changed, 83 insertions(+), 6 deletions(-)
> >> >> >
> >> >> > diff --git a/fs/ksmbd/transport_rdma.c b/fs/ksmbd/transport_rdma.c
> >> >> > index 7e57cbb0bb35..0820c6a9d75e 100644
> >> >> > --- a/fs/ksmbd/transport_rdma.c
> >> >> > +++ b/fs/ksmbd/transport_rdma.c
> >> >> > @@ -79,6 +79,14 @@ static int smb_direct_max_read_write_size = 1024
> >> >> > *
> >> >> > 1024;
> >> >> >
> >> >> >  static int smb_direct_max_outstanding_rw_ops = 8;
> >> >> >
> >> >> > +static LIST_HEAD(smb_direct_device_list);
> >> >> > +static DEFINE_RWLOCK(smb_direct_device_lock);
> >> >> > +
> >> >> > +struct smb_direct_device {
> >> >> > +     struct ib_device        *ib_dev;
> >> >> > +     struct list_head        list;
> >> >> > +};
> >> >> > +
> >> >> >  static struct smb_direct_listener {
> >> >> >       struct rdma_cm_id       *cm_id;
> >> >> >  } smb_direct_listener;
> >> >> > @@ -2007,12 +2015,62 @@ static int smb_direct_listen(int port)
> >> >> >       return ret;
> >> >> >  }
> >> >> >
> >> >> > +static int smb_direct_ib_client_add(struct ib_device *ib_dev)
> >> >> > +{
> >> >> > +     struct smb_direct_device *smb_dev;
> >> >> > +
> >> >> > +     if (!ib_dev->ops.get_netdev ||
> >> >> > +         ib_dev->node_type != RDMA_NODE_IB_CA ||
> >> >> > +         !rdma_frwr_is_supported(&ib_dev->attrs))
> >> >> > +             return 0;
> >> >> > +
> >> >> > +     smb_dev = kzalloc(sizeof(*smb_dev), GFP_KERNEL);
> >> >> > +     if (!smb_dev)
> >> >> > +             return -ENOMEM;
> >> >> > +     smb_dev->ib_dev = ib_dev;
> >> >> > +
> >> >> > +     write_lock(&smb_direct_device_lock);
> >> >> > +     list_add(&smb_dev->list, &smb_direct_device_list);
> >> >> > +     write_unlock(&smb_direct_device_lock);
> >> >> > +
> >> >> > +     ksmbd_debug(RDMA, "ib device added: name %s\n", ib_dev->name);
> >> >> > +     return 0;
> >> >> > +}
> >> >> > +
> >> >> > +static void smb_direct_ib_client_remove(struct ib_device *ib_dev,
> >> >> > +                                     void *client_data)
> >> >> > +{
> >> >> When is this function called? Is rdma shutdown needed here?
> >> >>
> >> >
> >> > This is called when a device is unplugged. After this call,
> >> > RDMA_CM_EVENT_DEVICE_REMOVAL event is generated.
> >> > And ksmbd handles this event.
> >> Okay, Have you actually tested it while unplugging NICs?
> >> And This patch doesn't work on Chelsio NICs(iWARP).
> >>
> >
> > Not yet. I will test it but need some time because of
> > setting up my test environment.
> Don't send untested patch to the list.
> >
> > Is the rdma capability set on Chelsio NICs without
> > this patch?
> Yes.

Okay, After testing device removal and fixing this,
I will send the v2 patch.

> >
> >> >
> >> >> > +     struct smb_direct_device *smb_dev, *tmp;
> >> >> > +
> >> >> > +     write_lock(&smb_direct_device_lock);
> >> >> > +     list_for_each_entry_safe(smb_dev, tmp,
> >> >> > &smb_direct_device_list,
> >> >> > list) {
> >> >> > +             if (smb_dev->ib_dev == ib_dev) {
> >> >> > +                     list_del(&smb_dev->list);
> >> >> > +                     kfree(smb_dev);
> >> >> > +                     break;
> >> >> > +             }
> >> >> > +     }
> >> >> > +     write_unlock(&smb_direct_device_lock);
> >> >> > +}
> >> >> > +
> >> >> > +static struct ib_client smb_direct_ib_client = {
> >> >> > +     .name   = "ksmbd_smb_direct_ib",
> >> >> Move smb_direct_ib_client_add and smb_direct_ib_client_remove to here.
> >> >>
> >> >> > +};
> >> >> > +
> >> >> >  int ksmbd_rdma_init(void)
> >> >> >  {
> >> >> >       int ret;
> >> >> >
> >> >> >       smb_direct_listener.cm_id = NULL;
> >> >> >
> >> >> > +     smb_direct_ib_client.add = smb_direct_ib_client_add;
> >> >> > +     smb_direct_ib_client.remove = smb_direct_ib_client_remove;
> >> >> > +     ret = ib_register_client(&smb_direct_ib_client);
> >> >> > +     if (ret) {
> >> >> > +             pr_err("failed to ib_register_client\n");
> >> >> > +             return ret;
> >> >> > +     }
> >> >> > +
> >> >> >       /* When a client is running out of send credits, the credits
> >> >> > are
> >> >> >        * granted by the server's sending a packet using this queue.
> >> >> >        * This avoids the situation that a clients cannot send
> >> >> > packets
> >> >> > @@ -2046,20 +2104,39 @@ int ksmbd_rdma_destroy(void)
> >> >> >               destroy_workqueue(smb_direct_wq);
> >> >> >               smb_direct_wq = NULL;
> >> >> >       }
> >> >> > +
> >> >> > +     if (smb_direct_ib_client.add)
> >> >> > +             ib_unregister_client(&smb_direct_ib_client);
> >> >> > +     smb_direct_ib_client.add = NULL;
> >> >> Why ?
> >> >>
> >> >
> >> > To avoid multiple calls of ib_unregister_client, because
> >> > ksmbd_rdma_destory() can be called without ksmbd_rdma_init().
> >> > I will clean two functions and remove these statements.
> >> >
> >> >> >       return 0;
> >> >> >  }
> >> >> >
> >> >> >  bool ksmbd_rdma_capable_netdev(struct net_device *netdev)
> >> >> >  {
> >> >> > -     struct ib_device *ibdev;
> >> >> > +     struct smb_direct_device *smb_dev;
> >> >> > +     int i;
> >> >> >       bool rdma_capable = false;
> >> >> >
> >> >> > -     ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_UNKNOWN);
> >> >> > -     if (ibdev) {
> >> >> > -             if (rdma_frwr_is_supported(&ibdev->attrs))
> >> >> > -                     rdma_capable = true;
> >> >> > -             ib_device_put(ibdev);
> >> >> > +     read_lock(&smb_direct_device_lock);
> >> >> > +     list_for_each_entry(smb_dev, &smb_direct_device_list, list) {
> >> >> > +             for (i = 0; i < smb_dev->ib_dev->phys_port_cnt; i++) {
> >> >> > +                     struct net_device *ndev;
> >> >> > +
> >> >> > +                     ndev =
> >> >> > smb_dev->ib_dev->ops.get_netdev(smb_dev->ib_dev,
> >> >> > +                                                            i + 1);
> >> >> > +                     if (!ndev)
> >> >> > +                             continue;
> >> >> > +
> >> >> > +                     if (ndev == netdev) {
> >> >> > +                             dev_put(ndev);
> >> >> > +                             rdma_capable = true;
> >> >> > +                             goto out;
> >> >> > +                     }
> >> >> > +                     dev_put(ndev);
> >> >> > +             }
> >> >> >       }
> >> >> > +out:
> >> >> > +     read_unlock(&smb_direct_device_lock);
> >> >> >       return rdma_capable;
> >> >> >  }
> >> >> >
> >> >> > --
> >> >> > 2.25.1
> >> >> >
> >> >> >
> >> >
> >> >
> >> >
> >> > --
> >> > Thanks,
> >> > Hyunchul
> >> >
> >
> >
> >
> > --
> > Thanks,
> > Hyunchul
> >
diff mbox series

Patch

diff --git a/fs/ksmbd/transport_rdma.c b/fs/ksmbd/transport_rdma.c
index 7e57cbb0bb35..0820c6a9d75e 100644
--- a/fs/ksmbd/transport_rdma.c
+++ b/fs/ksmbd/transport_rdma.c
@@ -79,6 +79,14 @@  static int smb_direct_max_read_write_size = 1024 * 1024;
 
 static int smb_direct_max_outstanding_rw_ops = 8;
 
+static LIST_HEAD(smb_direct_device_list);
+static DEFINE_RWLOCK(smb_direct_device_lock);
+
+struct smb_direct_device {
+	struct ib_device	*ib_dev;
+	struct list_head	list;
+};
+
 static struct smb_direct_listener {
 	struct rdma_cm_id	*cm_id;
 } smb_direct_listener;
@@ -2007,12 +2015,62 @@  static int smb_direct_listen(int port)
 	return ret;
 }
 
+static int smb_direct_ib_client_add(struct ib_device *ib_dev)
+{
+	struct smb_direct_device *smb_dev;
+
+	if (!ib_dev->ops.get_netdev ||
+	    ib_dev->node_type != RDMA_NODE_IB_CA ||
+	    !rdma_frwr_is_supported(&ib_dev->attrs))
+		return 0;
+
+	smb_dev = kzalloc(sizeof(*smb_dev), GFP_KERNEL);
+	if (!smb_dev)
+		return -ENOMEM;
+	smb_dev->ib_dev = ib_dev;
+
+	write_lock(&smb_direct_device_lock);
+	list_add(&smb_dev->list, &smb_direct_device_list);
+	write_unlock(&smb_direct_device_lock);
+
+	ksmbd_debug(RDMA, "ib device added: name %s\n", ib_dev->name);
+	return 0;
+}
+
+static void smb_direct_ib_client_remove(struct ib_device *ib_dev,
+					void *client_data)
+{
+	struct smb_direct_device *smb_dev, *tmp;
+
+	write_lock(&smb_direct_device_lock);
+	list_for_each_entry_safe(smb_dev, tmp, &smb_direct_device_list, list) {
+		if (smb_dev->ib_dev == ib_dev) {
+			list_del(&smb_dev->list);
+			kfree(smb_dev);
+			break;
+		}
+	}
+	write_unlock(&smb_direct_device_lock);
+}
+
+static struct ib_client smb_direct_ib_client = {
+	.name	= "ksmbd_smb_direct_ib",
+};
+
 int ksmbd_rdma_init(void)
 {
 	int ret;
 
 	smb_direct_listener.cm_id = NULL;
 
+	smb_direct_ib_client.add = smb_direct_ib_client_add;
+	smb_direct_ib_client.remove = smb_direct_ib_client_remove;
+	ret = ib_register_client(&smb_direct_ib_client);
+	if (ret) {
+		pr_err("failed to ib_register_client\n");
+		return ret;
+	}
+
 	/* When a client is running out of send credits, the credits are
 	 * granted by the server's sending a packet using this queue.
 	 * This avoids the situation that a clients cannot send packets
@@ -2046,20 +2104,39 @@  int ksmbd_rdma_destroy(void)
 		destroy_workqueue(smb_direct_wq);
 		smb_direct_wq = NULL;
 	}
+
+	if (smb_direct_ib_client.add)
+		ib_unregister_client(&smb_direct_ib_client);
+	smb_direct_ib_client.add = NULL;
 	return 0;
 }
 
 bool ksmbd_rdma_capable_netdev(struct net_device *netdev)
 {
-	struct ib_device *ibdev;
+	struct smb_direct_device *smb_dev;
+	int i;
 	bool rdma_capable = false;
 
-	ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_UNKNOWN);
-	if (ibdev) {
-		if (rdma_frwr_is_supported(&ibdev->attrs))
-			rdma_capable = true;
-		ib_device_put(ibdev);
+	read_lock(&smb_direct_device_lock);
+	list_for_each_entry(smb_dev, &smb_direct_device_list, list) {
+		for (i = 0; i < smb_dev->ib_dev->phys_port_cnt; i++) {
+			struct net_device *ndev;
+
+			ndev = smb_dev->ib_dev->ops.get_netdev(smb_dev->ib_dev,
+							       i + 1);
+			if (!ndev)
+				continue;
+
+			if (ndev == netdev) {
+				dev_put(ndev);
+				rdma_capable = true;
+				goto out;
+			}
+			dev_put(ndev);
+		}
 	}
+out:
+	read_unlock(&smb_direct_device_lock);
 	return rdma_capable;
 }