diff mbox series

[for-next,v2,3/4] RDMA/nldev: ib_pd can be pointed by multiple ib_ucontext

Message ID 20190520075333.6002-4-shamir.rabinovitch@oracle.com (mailing list archive)
State Superseded
Headers show
Series ib_pd should not have ib_uobject | expand

Commit Message

Shamir Rabinovitch May 20, 2019, 7:53 a.m. UTC
In shared object model ib_pd can belong to 1 or more ib_ucontext.
Fix the nldev code so it could report multiple context ids.

Signed-off-by: Shamir Rabinovitch <shamir.rabinovitch@oracle.com>
---
 drivers/infiniband/core/nldev.c | 93 +++++++++++++++++++++++++++++++--
 1 file changed, 88 insertions(+), 5 deletions(-)

Comments

Leon Romanovsky May 20, 2019, 9:18 a.m. UTC | #1
On Mon, May 20, 2019 at 10:53:20AM +0300, Shamir Rabinovitch wrote:
> In shared object model ib_pd can belong to 1 or more ib_ucontext.
> Fix the nldev code so it could report multiple context ids.
>
> Signed-off-by: Shamir Rabinovitch <shamir.rabinovitch@oracle.com>
> ---
>  drivers/infiniband/core/nldev.c | 93 +++++++++++++++++++++++++++++++--
>  1 file changed, 88 insertions(+), 5 deletions(-)
>
> diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
> index cbd712f5f8b2..f4cc92b897ff 100644
> --- a/drivers/infiniband/core/nldev.c
> +++ b/drivers/infiniband/core/nldev.c
> @@ -41,6 +41,9 @@
>  #include "core_priv.h"
>  #include "cma_priv.h"
>  #include "restrack.h"
> +#include "uverbs.h"
> +
> +static bool is_visible_in_pid_ns(struct rdma_restrack_entry *res);

Mark needed it too.
https://patchwork.kernel.org/patch/10921419/

>
>  static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = {
>  	[RDMA_NLDEV_ATTR_DEV_INDEX]     = { .type = NLA_U32 },
> @@ -584,11 +587,80 @@ static int fill_res_mr_entry(struct sk_buff *msg, bool has_cap_net_admin,
>  err:	return -EMSGSIZE;
>  }
>
> +struct context_id {
> +	struct list_head list;
> +	u32 id;
> +};
> +
> +static void pd_context(struct ib_pd *pd, struct list_head *list)
> +{
> +	struct ib_device *device = pd->device;
> +	struct rdma_restrack_entry *res;
> +	struct rdma_restrack_root *rt;
> +	struct ib_uverbs_file *ufile;
> +	struct ib_ucontext *ucontext;
> +	struct ib_uobject *uobj;
> +	unsigned long flags;
> +	unsigned long id;
> +	bool found;
> +
> +	rt = &device->res[RDMA_RESTRACK_CTX];
> +
> +	xa_lock(&rt->xa);
> +
> +	xa_for_each(&rt->xa, id, res) {
> +		if (!is_visible_in_pid_ns(res))
> +			continue;
> +
> +		if (!rdma_restrack_get(res))
> +			continue;
> +
> +		xa_unlock(&rt->xa);
> +
> +		ucontext = container_of(res, struct ib_ucontext, res);
> +		ufile = ucontext->ufile;
> +		found = false;
> +
> +		/* See locking requirements in struct ib_uverbs_file */
> +		down_read(&ufile->hw_destroy_rwsem);
> +		spin_lock_irqsave(&ufile->uobjects_lock, flags);
> +
> +		list_for_each_entry(uobj, &ufile->uobjects, list) {
> +			if (uobj->object == pd) {
> +				found = true;
> +				goto found;
> +			}
> +		}
> +
> +found:		spin_unlock_irqrestore(&ufile->uobjects_lock, flags);
> +		up_read(&ufile->hw_destroy_rwsem);
> +
> +		if (found) {
> +			struct context_id *ctx_id =
> +				kmalloc(sizeof(*ctx_id), GFP_KERNEL);
> +
> +			if (WARN_ON_ONCE(!ctx_id))
> +				goto next;
> +
> +			ctx_id->id = ucontext->res.id;
> +			list_add(&ctx_id->list, list);
> +		}
> +
> +next:		rdma_restrack_put(res);
> +		xa_lock(&rt->xa);
> +	}
> +
> +	xa_unlock(&rt->xa);
> +}
> +
>  static int fill_res_pd_entry(struct sk_buff *msg, bool has_cap_net_admin,
>  			     struct rdma_restrack_entry *res, uint32_t port)
>  {
>  	struct ib_pd *pd = container_of(res, struct ib_pd, res);
>  	struct ib_device *dev = pd->device;
> +	struct context_id *ctx_id;
> +	struct context_id *tmp;
> +	LIST_HEAD(pd_context_ids);
>
>  	if (has_cap_net_admin) {
>  		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY,
> @@ -606,10 +678,14 @@ static int fill_res_pd_entry(struct sk_buff *msg, bool has_cap_net_admin,
>  	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, res->id))
>  		goto err;
>
> -	if (!rdma_is_kernel_res(res) &&
> -	    nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN,
> -			pd->uobject->context->res.id))
> -		goto err;
> +	if (!rdma_is_kernel_res(res)) {
> +		pd_context(pd, &pd_context_ids);
> +		list_for_each_entry(ctx_id, &pd_context_ids, list) {
> +			if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN,
> +				ctx_id->id))

Did it work? You are overwriting RDMA_NLDEV_ATTR_RES_CTXN entry in the
loop. You need to add RDMA_NLDEV_ATTR_RES_CTX and
RDMA_NLDEV_ATTR_RES_CTX_ENTRY to include/uapi/rdma_netlink.h and
open nested table here (inside of PD) with list of contexts.

> +				goto err;
> +		}
> +	}
>
>  	if (fill_res_name_pid(msg, res))
>  		goto err;
> @@ -617,9 +693,16 @@ static int fill_res_pd_entry(struct sk_buff *msg, bool has_cap_net_admin,
>  	if (fill_res_entry(dev, msg, res))
>  		goto err;
>
> +	list_for_each_entry_safe(ctx_id, tmp, &pd_context_ids, list)
> +		kfree(ctx_id);
> +
>  	return 0;
>
> -err:	return -EMSGSIZE;
> +err:
> +	list_for_each_entry_safe(ctx_id, tmp, &pd_context_ids, list)
> +		kfree(ctx_id);
> +
> +	return -EMSGSIZE;
>  }
>
>  static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
> --
> 2.20.1
>
Shamir Rabinovitch May 20, 2019, 11:50 a.m. UTC | #2
On Mon, May 20, 2019 at 12:18:40PM +0300, Leon Romanovsky wrote:
> On Mon, May 20, 2019 at 10:53:20AM +0300, Shamir Rabinovitch wrote:
> > In shared object model ib_pd can belong to 1 or more ib_ucontext.
> > Fix the nldev code so it could report multiple context ids.
> >
> > Signed-off-by: Shamir Rabinovitch <shamir.rabinovitch@oracle.com>
> > ---
> >  drivers/infiniband/core/nldev.c | 93 +++++++++++++++++++++++++++++++--
> >  1 file changed, 88 insertions(+), 5 deletions(-)
> >
> > diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
> > index cbd712f5f8b2..f4cc92b897ff 100644
> > --- a/drivers/infiniband/core/nldev.c
> > +++ b/drivers/infiniband/core/nldev.c
> > @@ -41,6 +41,9 @@
> >  #include "core_priv.h"
> >  #include "cma_priv.h"
> >  #include "restrack.h"
> > +#include "uverbs.h"
> > +
> > +static bool is_visible_in_pid_ns(struct rdma_restrack_entry *res);
> 
> Mark needed it too.
> https://patchwork.kernel.org/patch/10921419/
> 

I see. So follow Mark patch the above hunk is not needed.
Should I apply Mark patch before this series?

> > +	if (!rdma_is_kernel_res(res)) {
> > +		pd_context(pd, &pd_context_ids);
> > +		list_for_each_entry(ctx_id, &pd_context_ids, list) {
> > +			if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN,
> > +				ctx_id->id))
> 
> Did it work? You are overwriting RDMA_NLDEV_ATTR_RES_CTXN entry in the
> loop. You need to add RDMA_NLDEV_ATTR_RES_CTX and
> RDMA_NLDEV_ATTR_RES_CTX_ENTRY to include/uapi/rdma_netlink.h and
> open nested table here (inside of PD) with list of contexts.

I tested with only 1 context per pd (what we have today). Thanks for
comment. I'll try to follow what you wrote here.
Leon Romanovsky May 20, 2019, 12:05 p.m. UTC | #3
On Mon, May 20, 2019 at 02:50:10PM +0300, Shamir Rabinovitch wrote:
> On Mon, May 20, 2019 at 12:18:40PM +0300, Leon Romanovsky wrote:
> > On Mon, May 20, 2019 at 10:53:20AM +0300, Shamir Rabinovitch wrote:
> > > In shared object model ib_pd can belong to 1 or more ib_ucontext.
> > > Fix the nldev code so it could report multiple context ids.
> > >
> > > Signed-off-by: Shamir Rabinovitch <shamir.rabinovitch@oracle.com>
> > > ---
> > >  drivers/infiniband/core/nldev.c | 93 +++++++++++++++++++++++++++++++--
> > >  1 file changed, 88 insertions(+), 5 deletions(-)
> > >
> > > diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
> > > index cbd712f5f8b2..f4cc92b897ff 100644
> > > --- a/drivers/infiniband/core/nldev.c
> > > +++ b/drivers/infiniband/core/nldev.c
> > > @@ -41,6 +41,9 @@
> > >  #include "core_priv.h"
> > >  #include "cma_priv.h"
> > >  #include "restrack.h"
> > > +#include "uverbs.h"
> > > +
> > > +static bool is_visible_in_pid_ns(struct rdma_restrack_entry *res);
> >
> > Mark needed it too.
> > https://patchwork.kernel.org/patch/10921419/
> >
>
> I see. So follow Mark patch the above hunk is not needed.
> Should I apply Mark patch before this series?

No, you should continue with your series, just be aware that once Mark's
patches will be merged your series will need some small update.

>
> > > +	if (!rdma_is_kernel_res(res)) {
> > > +		pd_context(pd, &pd_context_ids);
> > > +		list_for_each_entry(ctx_id, &pd_context_ids, list) {
> > > +			if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN,
> > > +				ctx_id->id))
> >
> > Did it work? You are overwriting RDMA_NLDEV_ATTR_RES_CTXN entry in the
> > loop. You need to add RDMA_NLDEV_ATTR_RES_CTX and
> > RDMA_NLDEV_ATTR_RES_CTX_ENTRY to include/uapi/rdma_netlink.h and
> > open nested table here (inside of PD) with list of contexts.
>
> I tested with only 1 context per pd (what we have today). Thanks for
> comment. I'll try to follow what you wrote here.
>
Shamir Rabinovitch May 20, 2019, 3:37 p.m. UTC | #4
On Mon, May 20, 2019 at 03:05:29PM +0300, Leon Romanovsky wrote:
> On Mon, May 20, 2019 at 02:50:10PM +0300, Shamir Rabinovitch wrote:
> > On Mon, May 20, 2019 at 12:18:40PM +0300, Leon Romanovsky wrote:
> > > On Mon, May 20, 2019 at 10:53:20AM +0300, Shamir Rabinovitch wrote:
> 
> >
> > > > +	if (!rdma_is_kernel_res(res)) {
> > > > +		pd_context(pd, &pd_context_ids);
> > > > +		list_for_each_entry(ctx_id, &pd_context_ids, list) {
> > > > +			if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN,
> > > > +				ctx_id->id))
> > >
> > > Did it work? You are overwriting RDMA_NLDEV_ATTR_RES_CTXN entry in the
> > > loop. You need to add RDMA_NLDEV_ATTR_RES_CTX and
> > > RDMA_NLDEV_ATTR_RES_CTX_ENTRY to include/uapi/rdma_netlink.h and
> > > open nested table here (inside of PD) with list of contexts.
> >
> > I tested with only 1 context per pd (what we have today). Thanks for
> > comment. I'll try to follow what you wrote here.
> >

Leon, what is your expectation here? I see 2 options:

1. Code will build same NL message in case of single context id and
nested table in case of multiple context ids

2. Code always build nested table with context id(s)

If taking option (1) we can postpone the iproute2 matching commits to
some extent but if we take option (2) I guess I have to add iproute2
commits as well - right? 

Also, what's the best way to add the changes in both given what you
choose above?

Thanks
Leon Romanovsky May 20, 2019, 4:07 p.m. UTC | #5
On Mon, May 20, 2019 at 06:37:39PM +0300, Shamir Rabinovitch wrote:
> On Mon, May 20, 2019 at 03:05:29PM +0300, Leon Romanovsky wrote:
> > On Mon, May 20, 2019 at 02:50:10PM +0300, Shamir Rabinovitch wrote:
> > > On Mon, May 20, 2019 at 12:18:40PM +0300, Leon Romanovsky wrote:
> > > > On Mon, May 20, 2019 at 10:53:20AM +0300, Shamir Rabinovitch wrote:
> >
> > >
> > > > > +	if (!rdma_is_kernel_res(res)) {
> > > > > +		pd_context(pd, &pd_context_ids);
> > > > > +		list_for_each_entry(ctx_id, &pd_context_ids, list) {
> > > > > +			if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN,
> > > > > +				ctx_id->id))
> > > >
> > > > Did it work? You are overwriting RDMA_NLDEV_ATTR_RES_CTXN entry in the
> > > > loop. You need to add RDMA_NLDEV_ATTR_RES_CTX and
> > > > RDMA_NLDEV_ATTR_RES_CTX_ENTRY to include/uapi/rdma_netlink.h and
> > > > open nested table here (inside of PD) with list of contexts.
> > >
> > > I tested with only 1 context per pd (what we have today). Thanks for
> > > comment. I'll try to follow what you wrote here.
> > >
>
> Leon, what is your expectation here? I see 2 options:
>
> 1. Code will build same NL message in case of single context id and
> nested table in case of multiple context ids
>
> 2. Code always build nested table with context id(s)
>
> If taking option (1) we can postpone the iproute2 matching commits to
> some extent but if we take option (2) I guess I have to add iproute2
> commits as well - right?

Yes, I imagined option 1 back then. The nested table inside of PD will
clearly mark that it is "shared PD" and rdmatool already handles
situation where no CTX is provided.

So you will have three options:
1. CTX is provided, no nested - user space PD - rdmatool supports it.
2. CTX is not provided, no nested - kernel space PD - rdmatool supports it by simply not printing cntx number.
3. CTX is not provided, nested table exists - shared PD - legacy rdmatool will think that we are in #2 and new
rdmatool will print list. This nested table should exist once PD in shared mode. I don't know if shared PD can
be with one context ID, but anyway it needs to be in nested table.

>
> Also, what's the best way to add the changes in both given what you
> choose above?

Patch plan?

>
> Thanks
Shamir Rabinovitch May 22, 2019, 12:25 p.m. UTC | #6
On Mon, May 20, 2019 at 12:18:40PM +0300, Leon Romanovsky wrote:
> On Mon, May 20, 2019 at 10:53:20AM +0300, Shamir Rabinovitch wrote:
> > In shared object model ib_pd can belong to 1 or more ib_ucontext.
> > Fix the nldev code so it could report multiple context ids.
> >
> > Signed-off-by: Shamir Rabinovitch <shamir.rabinovitch@oracle.com>
> > ---

[...]

> > +	if (!rdma_is_kernel_res(res)) {
> > +		pd_context(pd, &pd_context_ids);
> > +		list_for_each_entry(ctx_id, &pd_context_ids, list) {
> > +			if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN,
> > +				ctx_id->id))
> 
> Did it work? You are overwriting RDMA_NLDEV_ATTR_RES_CTXN entry in the
> loop. You need to add RDMA_NLDEV_ATTR_RES_CTX and
> RDMA_NLDEV_ATTR_RES_CTX_ENTRY to include/uapi/rdma_netlink.h and
> open nested table here (inside of PD) with list of contexts.
> 
> > +				goto err;
> > +		}

Hi Leon,

Just to clarify the above nesting...

Do you expect the below NL attribute nesting in case of shared pd dump?

RDMA_NLDEV_ATTR_RES_CTX
	RDMA_NLDEV_ATTR_RES_CTX_ENTRY
		RDMA_NLDEV_ATTR_RES_CTXN #1
		RDMA_NLDEV_ATTR_RES_CTXN #2
		...
		RDMA_NLDEV_ATTR_RES_CTXN #N


I tried this and rdmatool reported:

[root@qemu-fc29 iproute2]# rdma/rdma res show pd dev mlx4_0
dev mlx4_0 pdn 0 local_dma_lkey 0x8000 users 4 comm [ib_core]
dev mlx4_0 pdn 1 local_dma_lkey 0x8000 users 4 comm [ib_core]
error: Operation not supported

Is this the expected behaviour from unmodified latest rdmatool?

Thanks
Shamir Rabinovitch May 28, 2019, 12:33 p.m. UTC | #7
On Wed, May 22, 2019 at 03:25:32PM +0300, Shamir Rabinovitch wrote:
> On Mon, May 20, 2019 at 12:18:40PM +0300, Leon Romanovsky wrote:
> > On Mon, May 20, 2019 at 10:53:20AM +0300, Shamir Rabinovitch wrote:
> > > In shared object model ib_pd can belong to 1 or more ib_ucontext.
> > > Fix the nldev code so it could report multiple context ids.
> > >
> > > Signed-off-by: Shamir Rabinovitch <shamir.rabinovitch@oracle.com>
> > > ---
> 
> [...]
> 
> > > +	if (!rdma_is_kernel_res(res)) {
> > > +		pd_context(pd, &pd_context_ids);
> > > +		list_for_each_entry(ctx_id, &pd_context_ids, list) {
> > > +			if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN,
> > > +				ctx_id->id))
> > 
> > Did it work? You are overwriting RDMA_NLDEV_ATTR_RES_CTXN entry in the
> > loop. You need to add RDMA_NLDEV_ATTR_RES_CTX and
> > RDMA_NLDEV_ATTR_RES_CTX_ENTRY to include/uapi/rdma_netlink.h and
> > open nested table here (inside of PD) with list of contexts.
> > 
> > > +				goto err;
> > > +		}
> 
> Hi Leon,
> 
> Just to clarify the above nesting...
> 
> Do you expect the below NL attribute nesting in case of shared pd dump?
> 
> RDMA_NLDEV_ATTR_RES_CTX
> 	RDMA_NLDEV_ATTR_RES_CTX_ENTRY
> 		RDMA_NLDEV_ATTR_RES_CTXN #1
> 		RDMA_NLDEV_ATTR_RES_CTXN #2
> 		...
> 		RDMA_NLDEV_ATTR_RES_CTXN #N
> 
> 
> I tried this and rdmatool reported:
> 
> [root@qemu-fc29 iproute2]# rdma/rdma res show pd dev mlx4_0
> dev mlx4_0 pdn 0 local_dma_lkey 0x8000 users 4 comm [ib_core]
> dev mlx4_0 pdn 1 local_dma_lkey 0x8000 users 4 comm [ib_core]
> error: Operation not supported
> 
> Is this the expected behaviour from unmodified latest rdmatool?
> 
> Thanks

Leon, 

I tried this nesting (which make more sense to me) and results are the
same as above.

RDMA_NLDEV_ATTR_RES_CTX
	RDMA_NLDEV_ATTR_RES_CTX_ENTRY
		RDMA_NLDEV_ATTR_RES_CTXN
	RDMA_NLDEV_ATTR_RES_CTX_ENTRY
		RDMA_NLDEV_ATTR_RES_CTXN
...

Which is the nesting you expect ?

Is it OK that we get the rdma tool "error: Operation not supported" ?

Thanks
Leon Romanovsky May 28, 2019, 1:17 p.m. UTC | #8
On Tue, May 28, 2019 at 03:33:26PM +0300, Shamir Rabinovitch wrote:
> On Wed, May 22, 2019 at 03:25:32PM +0300, Shamir Rabinovitch wrote:
> > On Mon, May 20, 2019 at 12:18:40PM +0300, Leon Romanovsky wrote:
> > > On Mon, May 20, 2019 at 10:53:20AM +0300, Shamir Rabinovitch wrote:
> > > > In shared object model ib_pd can belong to 1 or more ib_ucontext.
> > > > Fix the nldev code so it could report multiple context ids.
> > > >
> > > > Signed-off-by: Shamir Rabinovitch <shamir.rabinovitch@oracle.com>
> > > > ---
> >
> > [...]
> >
> > > > +	if (!rdma_is_kernel_res(res)) {
> > > > +		pd_context(pd, &pd_context_ids);
> > > > +		list_for_each_entry(ctx_id, &pd_context_ids, list) {
> > > > +			if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN,
> > > > +				ctx_id->id))
> > >
> > > Did it work? You are overwriting RDMA_NLDEV_ATTR_RES_CTXN entry in the
> > > loop. You need to add RDMA_NLDEV_ATTR_RES_CTX and
> > > RDMA_NLDEV_ATTR_RES_CTX_ENTRY to include/uapi/rdma_netlink.h and
> > > open nested table here (inside of PD) with list of contexts.
> > >
> > > > +				goto err;
> > > > +		}
> >
> > Hi Leon,
> >
> > Just to clarify the above nesting...
> >
> > Do you expect the below NL attribute nesting in case of shared pd dump?
> >
> > RDMA_NLDEV_ATTR_RES_CTX
> > 	RDMA_NLDEV_ATTR_RES_CTX_ENTRY
> > 		RDMA_NLDEV_ATTR_RES_CTXN #1
> > 		RDMA_NLDEV_ATTR_RES_CTXN #2
> > 		...
> > 		RDMA_NLDEV_ATTR_RES_CTXN #N
> >
> >
> > I tried this and rdmatool reported:
> >
> > [root@qemu-fc29 iproute2]# rdma/rdma res show pd dev mlx4_0
> > dev mlx4_0 pdn 0 local_dma_lkey 0x8000 users 4 comm [ib_core]
> > dev mlx4_0 pdn 1 local_dma_lkey 0x8000 users 4 comm [ib_core]
> > error: Operation not supported
> >
> > Is this the expected behaviour from unmodified latest rdmatool?
> >
> > Thanks
>
> Leon,
>
> I tried this nesting (which make more sense to me) and results are the
> same as above.
>
> RDMA_NLDEV_ATTR_RES_CTX
> 	RDMA_NLDEV_ATTR_RES_CTX_ENTRY
> 		RDMA_NLDEV_ATTR_RES_CTXN
> 	RDMA_NLDEV_ATTR_RES_CTX_ENTRY
> 		RDMA_NLDEV_ATTR_RES_CTXN
> ...
>
> Which is the nesting you expect ?

Sorry, I was OOO and slightly try to catch all emails.

This latest variant looks right to me.

>
> Is it OK that we get the rdma tool "error: Operation not supported" ?

Definitely not. I don't see the origin of that error, it didn't come from rdmatool.

Thanks

>
> Thanks
diff mbox series

Patch

diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index cbd712f5f8b2..f4cc92b897ff 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -41,6 +41,9 @@ 
 #include "core_priv.h"
 #include "cma_priv.h"
 #include "restrack.h"
+#include "uverbs.h"
+
+static bool is_visible_in_pid_ns(struct rdma_restrack_entry *res);
 
 static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = {
 	[RDMA_NLDEV_ATTR_DEV_INDEX]     = { .type = NLA_U32 },
@@ -584,11 +587,80 @@  static int fill_res_mr_entry(struct sk_buff *msg, bool has_cap_net_admin,
 err:	return -EMSGSIZE;
 }
 
+struct context_id {
+	struct list_head list;
+	u32 id;
+};
+
+static void pd_context(struct ib_pd *pd, struct list_head *list)
+{
+	struct ib_device *device = pd->device;
+	struct rdma_restrack_entry *res;
+	struct rdma_restrack_root *rt;
+	struct ib_uverbs_file *ufile;
+	struct ib_ucontext *ucontext;
+	struct ib_uobject *uobj;
+	unsigned long flags;
+	unsigned long id;
+	bool found;
+
+	rt = &device->res[RDMA_RESTRACK_CTX];
+
+	xa_lock(&rt->xa);
+
+	xa_for_each(&rt->xa, id, res) {
+		if (!is_visible_in_pid_ns(res))
+			continue;
+
+		if (!rdma_restrack_get(res))
+			continue;
+
+		xa_unlock(&rt->xa);
+
+		ucontext = container_of(res, struct ib_ucontext, res);
+		ufile = ucontext->ufile;
+		found = false;
+
+		/* See locking requirements in struct ib_uverbs_file */
+		down_read(&ufile->hw_destroy_rwsem);
+		spin_lock_irqsave(&ufile->uobjects_lock, flags);
+
+		list_for_each_entry(uobj, &ufile->uobjects, list) {
+			if (uobj->object == pd) {
+				found = true;
+				goto found;
+			}
+		}
+
+found:		spin_unlock_irqrestore(&ufile->uobjects_lock, flags);
+		up_read(&ufile->hw_destroy_rwsem);
+
+		if (found) {
+			struct context_id *ctx_id =
+				kmalloc(sizeof(*ctx_id), GFP_KERNEL);
+
+			if (WARN_ON_ONCE(!ctx_id))
+				goto next;
+
+			ctx_id->id = ucontext->res.id;
+			list_add(&ctx_id->list, list);
+		}
+
+next:		rdma_restrack_put(res);
+		xa_lock(&rt->xa);
+	}
+
+	xa_unlock(&rt->xa);
+}
+
 static int fill_res_pd_entry(struct sk_buff *msg, bool has_cap_net_admin,
 			     struct rdma_restrack_entry *res, uint32_t port)
 {
 	struct ib_pd *pd = container_of(res, struct ib_pd, res);
 	struct ib_device *dev = pd->device;
+	struct context_id *ctx_id;
+	struct context_id *tmp;
+	LIST_HEAD(pd_context_ids);
 
 	if (has_cap_net_admin) {
 		if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY,
@@ -606,10 +678,14 @@  static int fill_res_pd_entry(struct sk_buff *msg, bool has_cap_net_admin,
 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, res->id))
 		goto err;
 
-	if (!rdma_is_kernel_res(res) &&
-	    nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN,
-			pd->uobject->context->res.id))
-		goto err;
+	if (!rdma_is_kernel_res(res)) {
+		pd_context(pd, &pd_context_ids);
+		list_for_each_entry(ctx_id, &pd_context_ids, list) {
+			if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN,
+				ctx_id->id))
+				goto err;
+		}
+	}
 
 	if (fill_res_name_pid(msg, res))
 		goto err;
@@ -617,9 +693,16 @@  static int fill_res_pd_entry(struct sk_buff *msg, bool has_cap_net_admin,
 	if (fill_res_entry(dev, msg, res))
 		goto err;
 
+	list_for_each_entry_safe(ctx_id, tmp, &pd_context_ids, list)
+		kfree(ctx_id);
+
 	return 0;
 
-err:	return -EMSGSIZE;
+err:
+	list_for_each_entry_safe(ctx_id, tmp, &pd_context_ids, list)
+		kfree(ctx_id);
+
+	return -EMSGSIZE;
 }
 
 static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,