diff mbox

[RFC,v2,09/13] ib/core: Enforce PKey security when modifying QPs

Message ID 1459985638-37233-10-git-send-email-danielj@mellanox.com (mailing list archive)
State Superseded
Headers show

Commit Message

Daniel Jurgens April 6, 2016, 11:33 p.m. UTC
From: Daniel Jurgens <danielj@mellanox.com>

Allocate and free a security context when creating and destroying a QP.
This context is used for controlling access to PKeys.

When a request is made to modify a QP that changes the port, PKey index,
alternate path port, or alternate path PKey index, check that the QP has
permission for the PKey in the index on the subnet prefix of the port.
If the QP is shared make sure all handles to the QP also have access.

Store which port and pkey a QP is using.  After the reset to init
transition the user can modify the port and pkey independently.  This
adds a transactional aspect to modify QP when the port, pkey, or
alternate path change.  Permission must be checked for the new settings,
then the modify can be attempted.  If the modify fails the old setting
should be restored.

To keep the security changes isolated a new file is used to hold security
related functionality.

Signed-off-by: Daniel Jurgens <danielj@mellanox.com>
Reviewed-by: Eli Cohen <eli@mellanox.com>
---
 drivers/infiniband/core/Makefile        |    2 +-
 drivers/infiniband/core/core_priv.h     |   41 ++++
 drivers/infiniband/core/core_security.c |  331 +++++++++++++++++++++++++++++++
 drivers/infiniband/core/uverbs_cmd.c    |   20 ++-
 drivers/infiniband/core/verbs.c         |   24 +++-
 include/rdma/ib_verbs.h                 |   28 +++-
 6 files changed, 439 insertions(+), 7 deletions(-)
 create mode 100644 drivers/infiniband/core/core_security.c

Comments

Leon Romanovsky April 7, 2016, 4:31 p.m. UTC | #1
On Thu, Apr 07, 2016 at 02:33:54AM +0300, Dan Jurgens wrote:
> From: Daniel Jurgens <danielj@mellanox.com>
> 
> Allocate and free a security context when creating and destroying a QP.
> This context is used for controlling access to PKeys.
> 
> When a request is made to modify a QP that changes the port, PKey index,
> alternate path port, or alternate path PKey index, check that the QP has
> permission for the PKey in the index on the subnet prefix of the port.
> If the QP is shared make sure all handles to the QP also have access.
> 
> Store which port and pkey a QP is using.  After the reset to init
> transition the user can modify the port and pkey independently.  This
> adds a transactional aspect to modify QP when the port, pkey, or
> alternate path change.  Permission must be checked for the new settings,
> then the modify can be attempted.  If the modify fails the old setting
> should be restored.
> 
> To keep the security changes isolated a new file is used to hold security
> related functionality.
> 
> Signed-off-by: Daniel Jurgens <danielj@mellanox.com>
> Reviewed-by: Eli Cohen <eli@mellanox.com>
> ---
>  drivers/infiniband/core/Makefile        |    2 +-
>  drivers/infiniband/core/core_priv.h     |   41 ++++
>  drivers/infiniband/core/core_security.c |  331 +++++++++++++++++++++++++++++++

We are already in core, there is no need to call files core_XXX.

>  drivers/infiniband/core/uverbs_cmd.c    |   20 ++-
>  drivers/infiniband/core/verbs.c         |   24 +++-
>  include/rdma/ib_verbs.h                 |   28 +++-
>  6 files changed, 439 insertions(+), 7 deletions(-)
>  create mode 100644 drivers/infiniband/core/core_security.c
> 
> diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
> index f818538..48a4013 100644
> --- a/drivers/infiniband/core/Makefile
> +++ b/drivers/infiniband/core/Makefile
> @@ -10,7 +10,7 @@ obj-$(CONFIG_INFINIBAND_USER_ACCESS) +=	ib_uverbs.o ib_ucm.o \
>  
>  ib_core-y :=			packer.o ud_header.o verbs.o cq.o sysfs.o \
>  				device.o fmr_pool.o cache.o netlink.o \
> -				roce_gid_mgmt.o
> +				roce_gid_mgmt.o core_security.o
>  ib_core-$(CONFIG_INFINIBAND_USER_MEM) += umem.o
>  ib_core-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += umem_odp.o umem_rbtree.o
>  
> diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
> index 722b866..27f2fa8 100644
> --- a/drivers/infiniband/core/core_priv.h
> +++ b/drivers/infiniband/core/core_priv.h
> @@ -140,4 +140,45 @@ static inline bool rdma_is_upper_dev_rcu(struct net_device *dev,
>  int ib_get_cached_subnet_prefix(struct ib_device *device,
>  				u8                port_num,
>  				u64              *sn_pfx);
> +
> +#ifdef CONFIG_SECURITY_INFINIBAND
> +int ib_security_modify_qp(struct ib_qp *qp,
> +			  struct ib_qp_attr *qp_attr,
> +			  int qp_attr_mask,
> +			  struct ib_udata *udata);
> +
> +int ib_security_create_qp_security(struct ib_qp *qp);

Why do we need xx_SECURITY_xxxxx_SECURITY in name?

> +void ib_security_destroy_qp(struct ib_qp_security *sec);
> +int ib_security_open_shared_qp(struct ib_qp *qp);
> +void ib_security_close_shared_qp(struct ib_qp_security *sec);
> +#else
> +static inline int ib_security_modify_qp(struct ib_qp *qp,
> +					struct ib_qp_attr *qp_attr,
> +					int qp_attr_mask,
> +					struct ib_udata *udata)
> +{
> +	return qp->device->modify_qp(qp->real_qp,
> +				     qp_attr,
> +				     qp_attr_mask,
> +				     udata);
> +}
> +
> +static inline int ib_security_create_qp_security(struct ib_qp *qp)
> +{
> +	return 0;
> +}
> +
> +static inline void ib_security_destroy_qp(struct ib_qp_security *sec)
> +{
> +}
> +
> +static inline int ib_security_open_shared_qp(struct ib_qp *qp)
> +{
> +	return 0;
> +}
> +
> +static inline void ib_security_close_shared_qp(struct ib_qp_security *sec)
> +{
> +}
> +#endif
>  #endif /* _CORE_PRIV_H */
> diff --git a/drivers/infiniband/core/core_security.c b/drivers/infiniband/core/core_security.c
> new file mode 100644
> index 0000000..768edea
> --- /dev/null
> +++ b/drivers/infiniband/core/core_security.c
> @@ -0,0 +1,331 @@
> +/*
> + * Copyright (c) 2016 Mellanox Technologies Ltd.  All rights reserved.
> + *
> + * This software is available to you under a choice of one of two
> + * licenses.  You may choose to be licensed under the terms of the GNU
> + * General Public License (GPL) Version 2, available from the file
> + * COPYING in the main directory of this source tree, or the
> + * OpenIB.org BSD license below:
> + *
> + *     Redistribution and use in source and binary forms, with or
> + *     without modification, are permitted provided that the following
> + *     conditions are met:
> + *
> + *      - Redistributions of source code must retain the above
> + *        copyright notice, this list of conditions and the following
> + *        disclaimer.
> + *
> + *      - Redistributions in binary form must reproduce the above
> + *        copyright notice, this list of conditions and the following
> + *        disclaimer in the documentation and/or other materials
> + *        provided with the distribution.
> + *
> + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
> + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
> + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
> + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
> + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
> + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
> + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
> + * SOFTWARE.
> + */
> +
> +#ifdef CONFIG_SECURITY_INFINIBAND
> +
> +#include <linux/security.h>
> +
> +#include <rdma/ib_verbs.h>
> +#include <rdma/ib_cache.h>
> +#include "core_priv.h"
> +
> +static int get_pkey_info(struct ib_device *dev,
> +			 u8 port_num,
> +			 u16 pkey_index,
> +			 u64 *subnet_prefix,
> +			 u16 *pkey)
> +{
> +	int err;
> +
> +	err = ib_get_cached_pkey(dev, port_num, pkey_index, pkey);
> +	if (err)
> +		return err;
> +
> +	err = ib_get_cached_subnet_prefix(dev, port_num, subnet_prefix);
> +
> +	return err;
> +}
> +
> +static int enforce_qp_pkey_security(struct ib_device *dev,
> +				    u8 port_num,
> +				    u16 pkey_index,
> +				    struct ib_qp_security *sec)
> +{
> +	struct ib_qp_security *shared_qp_sec;
> +	u64 subnet_prefix;
> +	int err = 0;
> +	u16 pkey;
> +
> +	err = get_pkey_info(dev, port_num, pkey_index, &subnet_prefix, &pkey);
> +	if (err)
> +		return err;
> +
> +	err = security_qp_pkey_access(subnet_prefix, pkey, sec);
> +	if (err)
> +		return err;
> +
> +	if (sec->qp == sec->qp->real_qp) {
> +		/* The caller of this function holds the QP security
> +		 * mutex so this list traversal is safe
> +		*/

Did the comment below pass checkpatch.pl?

> +		list_for_each_entry(shared_qp_sec,
> +				    &sec->shared_qp_list,
> +				    shared_qp_list) {

Is this list always needed to be protected by lock?
In general, I prefer to see lock/unlock operations near protected code.
Otherwise, it is hard to follow all lock/unlock paths.

> +			err = security_qp_pkey_access(subnet_prefix,
> +						      pkey,
> +						      shared_qp_sec);
> +			if (err)
> +				break;
> +		}
> +	}
> +	return err;
> +}
> +
> +static int check_pkey(const struct ib_qp *qp,
> +		      const struct ib_qp_attr *qp_attr,
> +		      int qp_attr_mask)
> +{
> +	bool check_pkey = !!(qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT));
> +
> +	return check_pkey && (qp->qp_num != IB_QPT_SMI &&
> +			      qp->qp_num != IB_QPT_GSI);

IB_QPT_SMI and IB_QPT_GSI are declared as struct ib_qp_type and setted
in qp->qp_type and not in qp->qp_num.

> +}
> +
> +static int check_alt_pkey(const struct ib_qp *qp,
> +			  const struct ib_qp_attr *qp_attr,
> +			  int qp_attr_mask)
> +{
> +	bool check_alt_pkey = !!(qp_attr_mask & IB_QP_ALT_PATH);
> +
> +	return check_alt_pkey && (qp->qp_num != IB_QPT_SMI &&
> +				  qp->qp_num != IB_QPT_GSI);
> +}

The same as above.

> +
> +static int affects_security_settings(const struct ib_qp *qp,
> +				     const struct ib_qp_attr *qp_attr,
> +				     int qp_attr_mask)
> +{
> +	return check_pkey(qp, qp_attr, qp_attr_mask) ||
> +	       check_alt_pkey(qp, qp_attr, qp_attr_mask);
> +}
> +
> +static void begin_port_pkey_change(struct ib_qp *qp,
> +				   struct ib_port_pkey *pp,
> +				   struct ib_port_pkey *old_pp,
> +				   u8 port_num,
> +				   u16 pkey_index)
> +{
> +	if (pp->state == IB_PORT_PKEY_NOT_VALID ||
> +	    (pkey_index != pp->pkey_index ||
> +	     port_num != pp->port_num)) {
> +		old_pp->pkey_index = pp->pkey_index;
> +		old_pp->port_num = pp->port_num;
> +		old_pp->state = pp->state;
> +
> +		pp->port_num = port_num;
> +		pp->pkey_index = pkey_index;
> +		pp->state = IB_PORT_PKEY_CHANGING;
> +	}
> +}
> +
> +static int qp_modify_enforce_security(struct ib_qp *qp,
> +				      const struct ib_qp_attr *qp_attr,
> +				      int qp_attr_mask)
> +{
> +	struct ib_qp_security *sec = qp->qp_sec;
> +	int err = 0;
> +
> +	if (check_pkey(qp, qp_attr, qp_attr_mask)) {
> +		u8 port_num = (qp_attr_mask & IB_QP_PORT) ?
> +			       qp_attr->port_num :
> +			       sec->ports_pkeys.main.port_num;
> +
> +		u16 pkey_index = (qp_attr_mask & IB_QP_PKEY_INDEX) ?
> +				  qp_attr->pkey_index :
> +				  sec->ports_pkeys.main.pkey_index;
> +
> +		err = enforce_qp_pkey_security(qp->device,
> +					       port_num,
> +					       pkey_index,
> +					       sec);
> +
> +		if (err)
> +			return err;
> +
> +		begin_port_pkey_change(qp,
> +				       &sec->ports_pkeys.main,
> +				       &sec->old_ports_pkeys.main,
> +				       port_num,
> +				       pkey_index);
> +	}
> +
> +	if (check_alt_pkey(qp, qp_attr, qp_attr_mask)) {
> +		err = enforce_qp_pkey_security(qp->device,
> +					       qp_attr->alt_port_num,
> +					       qp_attr->alt_pkey_index,
> +					       sec);
> +
> +		if (err)
> +			return err;
> +
> +		begin_port_pkey_change(qp,
> +				       &sec->ports_pkeys.alt,
> +				       &sec->old_ports_pkeys.alt,
> +				       qp_attr->alt_port_num,
> +				       qp_attr->alt_pkey_index);
> +	}
> +	return err;
> +}
> +
> +static void abort_port_pkey_change(struct ib_qp *qp,
> +				   struct ib_port_pkey *pp,
> +				   struct ib_port_pkey *old_pp)
> +{
> +	if (pp->state == IB_PORT_PKEY_CHANGING) {
> +		pp->pkey_index = old_pp->pkey_index;
> +		pp->port_num = old_pp->port_num;
> +		pp->state = old_pp->state;
> +	}
> +}
> +
> +static int cleanup_qp_pkey_associations(struct ib_qp *qp,
> +					bool revert_to_old)
> +{
> +	struct ib_qp_security *sec = qp->qp_sec;
> +
> +	if (revert_to_old) {
> +		abort_port_pkey_change(qp,
> +				       &qp->qp_sec->ports_pkeys.main,
> +				       &qp->qp_sec->old_ports_pkeys.main);
> +
> +		abort_port_pkey_change(qp,
> +				       &qp->qp_sec->ports_pkeys.alt,
> +				       &qp->qp_sec->old_ports_pkeys.alt);
> +	} else {
> +		if (sec->ports_pkeys.main.state == IB_PORT_PKEY_CHANGING)
> +			sec->ports_pkeys.main.state = IB_PORT_PKEY_VALID;
> +
> +		if (sec->ports_pkeys.alt.state == IB_PORT_PKEY_CHANGING)
> +			sec->ports_pkeys.alt.state = IB_PORT_PKEY_VALID;
> +	}
> +
> +	memset(&sec->old_ports_pkeys, 0, sizeof(sec->old_ports_pkeys));
> +
> +	return 0;
> +}
> +
> +int ib_security_open_shared_qp(struct ib_qp *qp)
> +{
> +	struct ib_qp *real_qp = qp->real_qp;
> +	int err;
> +
> +	err = ib_security_create_qp_security(qp);
> +	if (err)
> +		goto out;
> +
> +	mutex_lock(&real_qp->qp_sec->mutex);
> +
> +	if (real_qp->qp_sec->ports_pkeys.main.state != IB_PORT_PKEY_NOT_VALID)
> +		err = enforce_qp_pkey_security(real_qp->device,
> +					       real_qp->qp_sec->ports_pkeys.main.port_num,
> +					       real_qp->qp_sec->ports_pkeys.main.pkey_index,
> +					       qp->qp_sec);
> +	if (err)
> +		goto err;
> +
> +	if (real_qp->qp_sec->ports_pkeys.alt.state != IB_PORT_PKEY_NOT_VALID)
> +		err = enforce_qp_pkey_security(real_qp->device,
> +					       real_qp->qp_sec->ports_pkeys.alt.port_num,
> +					       real_qp->qp_sec->ports_pkeys.alt.pkey_index,
> +					       qp->qp_sec);
> +
> +	if (err)
> +		goto err;
> +
> +	if (qp != real_qp)
> +		list_add(&qp->qp_sec->shared_qp_list,
> +			 &real_qp->qp_sec->shared_qp_list);
> +err:
> +	mutex_unlock(&real_qp->qp_sec->mutex);
> +	if (err)
> +		ib_security_destroy_qp(qp->qp_sec);
> +
> +out:
> +	return err;
> +}
> +
> +void ib_security_close_shared_qp(struct ib_qp_security *sec)
> +{
> +	struct ib_qp *real_qp = sec->qp->real_qp;
> +
> +	mutex_lock(&real_qp->qp_sec->mutex);
> +	list_del(&sec->shared_qp_list);
> +	mutex_unlock(&real_qp->qp_sec->mutex);
> +
> +	ib_security_destroy_qp(sec);
> +}
> +
> +int ib_security_create_qp_security(struct ib_qp *qp)
> +{
> +	int err;
> +
> +	qp->qp_sec = kzalloc(sizeof(*qp->qp_sec), GFP_KERNEL);
> +	if (!qp->qp_sec)
> +		return -ENOMEM;
> +
> +	qp->qp_sec->qp = qp;
> +	mutex_init(&qp->qp_sec->mutex);
> +	INIT_LIST_HEAD(&qp->qp_sec->shared_qp_list);
> +	err = security_ib_qp_alloc_security(qp->qp_sec);
> +	if (err)
> +		kfree(qp->qp_sec);
> +
> +	return err;
> +}
> +EXPORT_SYMBOL(ib_security_create_qp_security);
> +
> +void ib_security_destroy_qp(struct ib_qp_security *sec)
> +{
> +	security_ib_qp_free_security(sec);
> +	kfree(sec);
> +}

Did you want to EXPORT_SYMBOL here too?

> +
> +int ib_security_modify_qp(struct ib_qp *qp,
> +			  struct ib_qp_attr *qp_attr,
> +			  int qp_attr_mask,
> +			  struct ib_udata *udata)
> +{
> +	int err = 0;
> +	bool enforce_security = affects_security_settings(qp,
> +							  qp_attr,
> +							  qp_attr_mask);
> +
> +	if (enforce_security) {
> +		mutex_lock(&qp->qp_sec->mutex);
> +
> +		err = qp_modify_enforce_security(qp, qp_attr, qp_attr_mask);
> +	}
> +
> +	if (!err)
> +		err = qp->device->modify_qp(qp->real_qp,
> +					    qp_attr,
> +					    qp_attr_mask,
> +					    udata);
> +	if (enforce_security) {
> +		cleanup_qp_pkey_associations(qp, !!err);
> +		mutex_unlock(&qp->qp_sec->mutex);
> +	}
> +	return err;
> +}
> +EXPORT_SYMBOL(ib_security_modify_qp);
> +
> +#endif /* CONFIG_SECURITY_INFINIBAND */
> diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
> index 6fdc7ec..6df15ea 100644
> --- a/drivers/infiniband/core/uverbs_cmd.c
> +++ b/drivers/infiniband/core/uverbs_cmd.c
> @@ -37,7 +37,7 @@
>  #include <linux/fs.h>
>  #include <linux/slab.h>
>  #include <linux/sched.h>
> -
> +#include <linux/security.h>
>  #include <asm/uaccess.h>
>  
>  #include "uverbs.h"
> @@ -1857,6 +1857,10 @@ static int create_qp(struct ib_uverbs_file *file,
>  	}
>  
>  	if (cmd->qp_type != IB_QPT_XRC_TGT) {
> +		ret = ib_security_create_qp_security(qp);
> +		if (ret)
> +			goto err_destroy;
> +
>  		qp->real_qp	  = qp;
>  		qp->device	  = device;
>  		qp->pd		  = pd;
> @@ -2339,10 +2343,18 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
>  		ret = ib_resolve_eth_dmac(qp, attr, &cmd.attr_mask);
>  		if (ret)
>  			goto release_qp;
> -		ret = qp->device->modify_qp(qp, attr,
> -			modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata);
> +
> +		ret = ib_security_modify_qp(qp,
> +					    attr,
> +					    modify_qp_mask(qp->qp_type,
> +							   cmd.attr_mask),
> +					    &udata);
>  	} else {
> -		ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask));
> +		ret = ib_security_modify_qp(qp,
> +					    attr,
> +					    modify_qp_mask(qp->qp_type,
> +							   cmd.attr_mask),
> +					    NULL);
>  	}
>  
>  	if (ret)
> diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
> index 15b8adb..47000ee 100644
> --- a/drivers/infiniband/core/verbs.c
> +++ b/drivers/infiniband/core/verbs.c
> @@ -44,6 +44,7 @@
>  #include <linux/in.h>
>  #include <linux/in6.h>
>  #include <net/addrconf.h>
> +#include <linux/security.h>
>  
>  #include <rdma/ib_verbs.h>
>  #include <rdma/ib_cache.h>
> @@ -681,12 +682,19 @@ static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp,
>  {
>  	struct ib_qp *qp;
>  	unsigned long flags;
> +	int err;
>  
>  	qp = kzalloc(sizeof *qp, GFP_KERNEL);
>  	if (!qp)
>  		return ERR_PTR(-ENOMEM);
>  
>  	qp->real_qp = real_qp;
> +	err = ib_security_open_shared_qp(qp);
> +	if (err) {
> +		kfree(qp);
> +		return ERR_PTR(err);
> +	}
> +
>  	atomic_inc(&real_qp->usecnt);
>  	qp->device = real_qp->device;
>  	qp->event_handler = event_handler;
> @@ -728,11 +736,16 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
>  {
>  	struct ib_qp *qp, *real_qp;
>  	struct ib_device *device;
> +	int err;
>  
>  	device = pd ? pd->device : qp_init_attr->xrcd->device;
>  	qp = device->create_qp(pd, qp_init_attr, NULL);
>  
>  	if (!IS_ERR(qp)) {
> +		err = ib_security_create_qp_security(qp);
> +		if (err)
> +			goto destroy_qp;
> +
>  		qp->device     = device;
>  		qp->real_qp    = qp;
>  		qp->uobject    = NULL;
> @@ -780,6 +793,10 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
>  	}
>  
>  	return qp;
> +
> +destroy_qp:
> +	ib_destroy_qp(qp);
> +	return ERR_PTR(err);
>  }
>  EXPORT_SYMBOL(ib_create_qp);
>  
> @@ -1180,7 +1197,7 @@ int ib_modify_qp(struct ib_qp *qp,
>  	if (ret)
>  		return ret;
>  
> -	return qp->device->modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL);
> +	return ib_security_modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL);
>  }
>  EXPORT_SYMBOL(ib_modify_qp);
>  
> @@ -1209,6 +1226,7 @@ int ib_close_qp(struct ib_qp *qp)
>  	spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
>  
>  	atomic_dec(&real_qp->usecnt);
> +	ib_security_close_shared_qp(qp->qp_sec);
>  	kfree(qp);
>  
>  	return 0;
> @@ -1248,6 +1266,7 @@ int ib_destroy_qp(struct ib_qp *qp)
>  	struct ib_pd *pd;
>  	struct ib_cq *scq, *rcq;
>  	struct ib_srq *srq;
> +	struct ib_qp_security *sec;
>  	int ret;
>  
>  	if (atomic_read(&qp->usecnt))
> @@ -1260,6 +1279,7 @@ int ib_destroy_qp(struct ib_qp *qp)
>  	scq  = qp->send_cq;
>  	rcq  = qp->recv_cq;
>  	srq  = qp->srq;
> +	sec  = qp->qp_sec;
>  
>  	ret = qp->device->destroy_qp(qp);
>  	if (!ret) {
> @@ -1271,6 +1291,8 @@ int ib_destroy_qp(struct ib_qp *qp)
>  			atomic_dec(&rcq->usecnt);
>  		if (srq)
>  			atomic_dec(&srq->usecnt);
> +		if (sec)
> +			ib_security_destroy_qp(sec);
>  	}
>  
>  	return ret;
> diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
> index 870c5ac..f71cb47 100644
> --- a/include/rdma/ib_verbs.h
> +++ b/include/rdma/ib_verbs.h
> @@ -1416,8 +1416,34 @@ struct ib_srq {
>  	} ext;
>  };
>  
> +enum port_pkey_state {
> +	IB_PORT_PKEY_NOT_VALID = 0,
> +	IB_PORT_PKEY_VALID = 1,
> +	IB_PORT_PKEY_CHANGING = 2,
> +};
> +
> +struct ib_port_pkey {
> +	enum port_pkey_state	state;
> +	u16			pkey_index;
> +	u8			port_num;
> +};
> +
> +struct ib_ports_pkeys {
> +	struct ib_port_pkey	main;
> +	struct ib_port_pkey	alt;
> +};
> +
>  struct ib_qp_security {
> -	void *q_security;
> +	struct ib_qp	       *qp;
> +	/* Hold this mutex when changing port and pkey settings. */
> +	struct mutex		mutex;
> +	struct ib_ports_pkeys	ports_pkeys;
> +	struct ib_ports_pkeys	old_ports_pkeys;
> +	/* A list of all open shared QP handles.  Required to enforce security
> +	 * properly for all users of a shared QP.
> +	 */
> +	struct list_head        shared_qp_list;
> +	void                   *q_security;
>  };
>  
>  struct ib_qp {
> -- 
> 1.7.1
> 
> --
> To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
Daniel Jurgens April 7, 2016, 5:03 p.m. UTC | #2
On 4/7/2016 11:31 AM, Leon Romanovsky wrote:
> On Thu, Apr 07, 2016 at 02:33:54AM +0300, Dan Jurgens wrote:
>> From: Daniel Jurgens <danielj@mellanox.com>
>>  drivers/infiniband/core/core_priv.h     |   41 ++++
>>  drivers/infiniband/core/core_security.c |  331 +++++++++++++++++++++++++++++++
> 
> We are already in core, there is no need to call files core_XXX.

I can change core_security.c to security.c.  core_priv.h already existed
so I won't change that in this patch.

>> +int ib_security_create_qp_security(struct ib_qp *qp);
> 
> Why do we need xx_SECURITY_xxxxx_SECURITY in name?

I had called it ib_security_create_qp. Eli thought that implied more
work that it actually does.  All the none static functions are named
ib_security_xxx, which I kind of liked as a convention, but I can
abandon that and go with ib_create_qp_security.

>> +	return check_pkey && (qp->qp_num != IB_QPT_SMI &&
>> +			      qp->qp_num != IB_QPT_GSI);
> 
> IB_QPT_SMI and IB_QPT_GSI are declared as struct ib_qp_type and setted
> in qp->qp_type and not in qp->qp_num.
>> +}
>> +
>> +static int check_alt_pkey(const struct ib_qp *qp,
>> +			  const struct ib_qp_attr *qp_attr,
>> +			  int qp_attr_mask)
>> +{
>> +	bool check_alt_pkey = !!(qp_attr_mask & IB_QP_ALT_PATH);
>> +
>> +	return check_alt_pkey && (qp->qp_num != IB_QPT_SMI &&
>> +				  qp->qp_num != IB_QPT_GSI);
>> +}
> 
> The same as above.

I'll fix this.

>> +		kfree(qp->qp_sec);
>> +
>> +	return err;
>> +}
>> +EXPORT_SYMBOL(ib_security_create_qp_security);
>> +
>> +void ib_security_destroy_qp(struct ib_qp_security *sec)
>> +{
>> +	security_ib_qp_free_security(sec);
>> +	kfree(sec);
>> +}
> 
> Did you want to EXPORT_SYMBOL here too?
> 

It's not called from outside ib_core, I only exported the functions that
are. ib_security_modify_qp and ib_security_create_qp security are called
from ib_core and ib_uverbs.  ib_security_enforce_mad_agent_pkey_access
is called from ib_mad.


--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Leon Romanovsky April 7, 2016, 5:39 p.m. UTC | #3
On Thu, Apr 07, 2016 at 05:03:50PM +0000, Daniel Jurgens wrote:
> On 4/7/2016 11:31 AM, Leon Romanovsky wrote:
> > On Thu, Apr 07, 2016 at 02:33:54AM +0300, Dan Jurgens wrote:
> >> From: Daniel Jurgens <danielj@mellanox.com>
> >> +		kfree(qp->qp_sec);
> >> +
> >> +	return err;
> >> +}
> >> +EXPORT_SYMBOL(ib_security_create_qp_security);
> >> +
> >> +void ib_security_destroy_qp(struct ib_qp_security *sec)
> >> +{
> >> +	security_ib_qp_free_security(sec);
> >> +	kfree(sec);
> >> +}
> > 
> > Did you want to EXPORT_SYMBOL here too?
> > 
> 
> It's not called from outside ib_core, I only exported the functions that
> are. ib_security_modify_qp and ib_security_create_qp security are called
> from ib_core and ib_uverbs.  ib_security_enforce_mad_agent_pkey_access
> is called from ib_mad.

So, how are you releasing the memory which is taken by
ib_security_create_qp_security ?

> 
>
Daniel Jurgens April 7, 2016, 5:44 p.m. UTC | #4
On 4/7/2016 12:40 PM, leon@leon.nu wrote:
> On Thu, Apr 07, 2016 at 05:03:50PM +0000, Daniel Jurgens wrote:
>> On 4/7/2016 11:31 AM, Leon Romanovsky wrote:
>>> On Thu, Apr 07, 2016 at 02:33:54AM +0300, Dan Jurgens wrote:
>>>> From: Daniel Jurgens <danielj@mellanox.com>
>>>> +		kfree(qp->qp_sec);
>>>> +
>>>> +	return err;
>>>> +}
>>>> +EXPORT_SYMBOL(ib_security_create_qp_security);
>>>> +
>>>> +void ib_security_destroy_qp(struct ib_qp_security *sec)
>>>> +{
>>>> +	security_ib_qp_free_security(sec);
>>>> +	kfree(sec);
>>>> +}
>>>
>>> Did you want to EXPORT_SYMBOL here too?
>>>
>>
>> It's not called from outside ib_core, I only exported the functions that
>> are. ib_security_modify_qp and ib_security_create_qp security are called
>> from ib_core and ib_uverbs.  ib_security_enforce_mad_agent_pkey_access
>> is called from ib_mad.
> 
> So, how are you releasing the memory which is taken by
> ib_security_create_qp_security ?

It is freed in ib_security_destroy_qp, which is called by ib_destroy_qp
after the QP is destroyed successfully.


--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Daniel Jurgens April 7, 2016, 9:02 p.m. UTC | #5
On 4/7/2016 11:31 AM, Leon Romanovsky wrote:
> On Thu, Apr 07, 2016 at 02:33:54AM +0300, Dan Jurgens wrote:
> 
>> +	if (sec->qp == sec->qp->real_qp) {
>> +		/* The caller of this function holds the QP security
>> +		 * mutex so this list traversal is safe
>> +		*/
> 
> Did the comment below pass checkpatch.pl?

It did.

>> +		list_for_each_entry(shared_qp_sec,
>> +				    &sec->shared_qp_list,
>> +				    shared_qp_list) {
> 
> Is this list always needed to be protected by lock?
> In general, I prefer to see lock/unlock operations near protected code.
> Otherwise, it is hard to follow all lock/unlock paths.

The mutex is required, and I'm not sure how to push it lower safely.
Consider the situation where a QP is being modified concurrently with an
open_shared_qp. Unless the lock is held the entire time for each
operation open_shared_qp could check permission using a ib_qp_security
struct in a partially modified state, i.e. there could be a mix of new
and old settings for the port/pkey index/alternate path.  Shared QPs use
the port and pkey setting for the underlying real QP with the shared qp
q_security.
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Leon Romanovsky April 7, 2016, 9:10 p.m. UTC | #6
On Thu, Apr 07, 2016 at 09:02:43PM +0000, Daniel Jurgens wrote:
> On 4/7/2016 11:31 AM, Leon Romanovsky wrote:
> > On Thu, Apr 07, 2016 at 02:33:54AM +0300, Dan Jurgens wrote:
> > 
> >> +	if (sec->qp == sec->qp->real_qp) {
> >> +		/* The caller of this function holds the QP security
> >> +		 * mutex so this list traversal is safe
> >> +		*/
> > 
> > Did the comment below pass checkpatch.pl?
> 
> It did.
> 
> >> +		list_for_each_entry(shared_qp_sec,
> >> +				    &sec->shared_qp_list,
> >> +				    shared_qp_list) {
> > 
> > Is this list always needed to be protected by lock?
> > In general, I prefer to see lock/unlock operations near protected code.
> > Otherwise, it is hard to follow all lock/unlock paths.
> 
> The mutex is required, and I'm not sure how to push it lower safely.
> Consider the situation where a QP is being modified concurrently with an
> open_shared_qp. Unless the lock is held the entire time for each
> operation open_shared_qp could check permission using a ib_qp_security
> struct in a partially modified state, i.e. there could be a mix of new
> and old settings for the port/pkey index/alternate path.  Shared QPs use
> the port and pkey setting for the underlying real QP with the shared qp
> q_security.

Common flow is to do everything in one place:
1. lock
2. check permissions
3. change
4. release lock

Your flow is much broader.
1. lock
2. check permission
3. do work
4. change
5. do work
6. release lock.
Daniel Jurgens April 7, 2016, 9:23 p.m. UTC | #7
On 4/7/2016 4:11 PM, leon@leon.nu wrote:
> On Thu, Apr 07, 2016 at 09:02:43PM +0000, Daniel Jurgens wrote:
>> On 4/7/2016 11:31 AM, Leon Romanovsky wrote:
>>> On Thu, Apr 07, 2016 at 02:33:54AM +0300, Dan Jurgens wrote:
>>>
>>>> +	if (sec->qp == sec->qp->real_qp) {
>>>> +		/* The caller of this function holds the QP security
>>>> +		 * mutex so this list traversal is safe
>>>> +		*/
>>>
>>> Did the comment below pass checkpatch.pl?
>>
>> It did.
>>
>>>> +		list_for_each_entry(shared_qp_sec,
>>>> +				    &sec->shared_qp_list,
>>>> +				    shared_qp_list) {
>>>
>>> Is this list always needed to be protected by lock?
>>> In general, I prefer to see lock/unlock operations near protected code.
>>> Otherwise, it is hard to follow all lock/unlock paths.
>>
>> The mutex is required, and I'm not sure how to push it lower safely.
>> Consider the situation where a QP is being modified concurrently with an
>> open_shared_qp. Unless the lock is held the entire time for each
>> operation open_shared_qp could check permission using a ib_qp_security
>> struct in a partially modified state, i.e. there could be a mix of new
>> and old settings for the port/pkey index/alternate path.  Shared QPs use
>> the port and pkey setting for the underlying real QP with the shared qp
>> q_security.
> 
> Common flow is to do everything in one place:
> 1. lock
> 2. check permissions
> 3. change
> 4. release lock
> 
> Your flow is much broader.
> 1. lock
> 2. check permission
> 3. do work
> 4. change
> 5. do work
> 6. release lock.
> 

I see what your saying. This is an artifact of me breaking this up into
two patches after the fact.  "[RFC PATCH v2 12/13] ib/core: Track which
QPs are using which port and PKey index" introduces the reason that is
needed.  In that patch lists of QP security structures are maintained
and walked when there are changes to pkey table or GIDs.  Those list
operations don't hold the QP security mutexes for all the QPs so it con
run concurrently with a QP modify. This means the QP needs to be added
to the new setting lists, and then clean up the lists after the modify
finishes.

I can change this patch to the common flow above, and then introduce "my
flow" in the patch that requires it.  Another option would be to squash
the two together.  They are both relatively large so I am hesitant to do
that.



--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Leon Romanovsky April 7, 2016, 11:24 p.m. UTC | #8
On Thu, Apr 07, 2016 at 09:23:04PM +0000, Daniel Jurgens wrote:
> I can change this patch to the common flow above, and then introduce "my
> flow" in the patch that requires it.  Another option would be to squash
> the two together.  They are both relatively large so I am hesitant to do
> that.

It is up to you. There are two things which you can take into account
while doing it: correct code and easy review.

Thanks.
diff mbox

Patch

diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
index f818538..48a4013 100644
--- a/drivers/infiniband/core/Makefile
+++ b/drivers/infiniband/core/Makefile
@@ -10,7 +10,7 @@  obj-$(CONFIG_INFINIBAND_USER_ACCESS) +=	ib_uverbs.o ib_ucm.o \
 
 ib_core-y :=			packer.o ud_header.o verbs.o cq.o sysfs.o \
 				device.o fmr_pool.o cache.o netlink.o \
-				roce_gid_mgmt.o
+				roce_gid_mgmt.o core_security.o
 ib_core-$(CONFIG_INFINIBAND_USER_MEM) += umem.o
 ib_core-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += umem_odp.o umem_rbtree.o
 
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index 722b866..27f2fa8 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -140,4 +140,45 @@  static inline bool rdma_is_upper_dev_rcu(struct net_device *dev,
 int ib_get_cached_subnet_prefix(struct ib_device *device,
 				u8                port_num,
 				u64              *sn_pfx);
+
+#ifdef CONFIG_SECURITY_INFINIBAND
+int ib_security_modify_qp(struct ib_qp *qp,
+			  struct ib_qp_attr *qp_attr,
+			  int qp_attr_mask,
+			  struct ib_udata *udata);
+
+int ib_security_create_qp_security(struct ib_qp *qp);
+void ib_security_destroy_qp(struct ib_qp_security *sec);
+int ib_security_open_shared_qp(struct ib_qp *qp);
+void ib_security_close_shared_qp(struct ib_qp_security *sec);
+#else
+static inline int ib_security_modify_qp(struct ib_qp *qp,
+					struct ib_qp_attr *qp_attr,
+					int qp_attr_mask,
+					struct ib_udata *udata)
+{
+	return qp->device->modify_qp(qp->real_qp,
+				     qp_attr,
+				     qp_attr_mask,
+				     udata);
+}
+
+static inline int ib_security_create_qp_security(struct ib_qp *qp)
+{
+	return 0;
+}
+
+static inline void ib_security_destroy_qp(struct ib_qp_security *sec)
+{
+}
+
+static inline int ib_security_open_shared_qp(struct ib_qp *qp)
+{
+	return 0;
+}
+
+static inline void ib_security_close_shared_qp(struct ib_qp_security *sec)
+{
+}
+#endif
 #endif /* _CORE_PRIV_H */
diff --git a/drivers/infiniband/core/core_security.c b/drivers/infiniband/core/core_security.c
new file mode 100644
index 0000000..768edea
--- /dev/null
+++ b/drivers/infiniband/core/core_security.c
@@ -0,0 +1,331 @@ 
+/*
+ * Copyright (c) 2016 Mellanox Technologies Ltd.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifdef CONFIG_SECURITY_INFINIBAND
+
+#include <linux/security.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_cache.h>
+#include "core_priv.h"
+
+static int get_pkey_info(struct ib_device *dev,
+			 u8 port_num,
+			 u16 pkey_index,
+			 u64 *subnet_prefix,
+			 u16 *pkey)
+{
+	int err;
+
+	err = ib_get_cached_pkey(dev, port_num, pkey_index, pkey);
+	if (err)
+		return err;
+
+	err = ib_get_cached_subnet_prefix(dev, port_num, subnet_prefix);
+
+	return err;
+}
+
+static int enforce_qp_pkey_security(struct ib_device *dev,
+				    u8 port_num,
+				    u16 pkey_index,
+				    struct ib_qp_security *sec)
+{
+	struct ib_qp_security *shared_qp_sec;
+	u64 subnet_prefix;
+	int err = 0;
+	u16 pkey;
+
+	err = get_pkey_info(dev, port_num, pkey_index, &subnet_prefix, &pkey);
+	if (err)
+		return err;
+
+	err = security_qp_pkey_access(subnet_prefix, pkey, sec);
+	if (err)
+		return err;
+
+	if (sec->qp == sec->qp->real_qp) {
+		/* The caller of this function holds the QP security
+		 * mutex so this list traversal is safe
+		*/
+		list_for_each_entry(shared_qp_sec,
+				    &sec->shared_qp_list,
+				    shared_qp_list) {
+			err = security_qp_pkey_access(subnet_prefix,
+						      pkey,
+						      shared_qp_sec);
+			if (err)
+				break;
+		}
+	}
+	return err;
+}
+
+static int check_pkey(const struct ib_qp *qp,
+		      const struct ib_qp_attr *qp_attr,
+		      int qp_attr_mask)
+{
+	bool check_pkey = !!(qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT));
+
+	return check_pkey && (qp->qp_num != IB_QPT_SMI &&
+			      qp->qp_num != IB_QPT_GSI);
+}
+
+static int check_alt_pkey(const struct ib_qp *qp,
+			  const struct ib_qp_attr *qp_attr,
+			  int qp_attr_mask)
+{
+	bool check_alt_pkey = !!(qp_attr_mask & IB_QP_ALT_PATH);
+
+	return check_alt_pkey && (qp->qp_num != IB_QPT_SMI &&
+				  qp->qp_num != IB_QPT_GSI);
+}
+
+static int affects_security_settings(const struct ib_qp *qp,
+				     const struct ib_qp_attr *qp_attr,
+				     int qp_attr_mask)
+{
+	return check_pkey(qp, qp_attr, qp_attr_mask) ||
+	       check_alt_pkey(qp, qp_attr, qp_attr_mask);
+}
+
+static void begin_port_pkey_change(struct ib_qp *qp,
+				   struct ib_port_pkey *pp,
+				   struct ib_port_pkey *old_pp,
+				   u8 port_num,
+				   u16 pkey_index)
+{
+	if (pp->state == IB_PORT_PKEY_NOT_VALID ||
+	    (pkey_index != pp->pkey_index ||
+	     port_num != pp->port_num)) {
+		old_pp->pkey_index = pp->pkey_index;
+		old_pp->port_num = pp->port_num;
+		old_pp->state = pp->state;
+
+		pp->port_num = port_num;
+		pp->pkey_index = pkey_index;
+		pp->state = IB_PORT_PKEY_CHANGING;
+	}
+}
+
+static int qp_modify_enforce_security(struct ib_qp *qp,
+				      const struct ib_qp_attr *qp_attr,
+				      int qp_attr_mask)
+{
+	struct ib_qp_security *sec = qp->qp_sec;
+	int err = 0;
+
+	if (check_pkey(qp, qp_attr, qp_attr_mask)) {
+		u8 port_num = (qp_attr_mask & IB_QP_PORT) ?
+			       qp_attr->port_num :
+			       sec->ports_pkeys.main.port_num;
+
+		u16 pkey_index = (qp_attr_mask & IB_QP_PKEY_INDEX) ?
+				  qp_attr->pkey_index :
+				  sec->ports_pkeys.main.pkey_index;
+
+		err = enforce_qp_pkey_security(qp->device,
+					       port_num,
+					       pkey_index,
+					       sec);
+
+		if (err)
+			return err;
+
+		begin_port_pkey_change(qp,
+				       &sec->ports_pkeys.main,
+				       &sec->old_ports_pkeys.main,
+				       port_num,
+				       pkey_index);
+	}
+
+	if (check_alt_pkey(qp, qp_attr, qp_attr_mask)) {
+		err = enforce_qp_pkey_security(qp->device,
+					       qp_attr->alt_port_num,
+					       qp_attr->alt_pkey_index,
+					       sec);
+
+		if (err)
+			return err;
+
+		begin_port_pkey_change(qp,
+				       &sec->ports_pkeys.alt,
+				       &sec->old_ports_pkeys.alt,
+				       qp_attr->alt_port_num,
+				       qp_attr->alt_pkey_index);
+	}
+	return err;
+}
+
+static void abort_port_pkey_change(struct ib_qp *qp,
+				   struct ib_port_pkey *pp,
+				   struct ib_port_pkey *old_pp)
+{
+	if (pp->state == IB_PORT_PKEY_CHANGING) {
+		pp->pkey_index = old_pp->pkey_index;
+		pp->port_num = old_pp->port_num;
+		pp->state = old_pp->state;
+	}
+}
+
+static int cleanup_qp_pkey_associations(struct ib_qp *qp,
+					bool revert_to_old)
+{
+	struct ib_qp_security *sec = qp->qp_sec;
+
+	if (revert_to_old) {
+		abort_port_pkey_change(qp,
+				       &qp->qp_sec->ports_pkeys.main,
+				       &qp->qp_sec->old_ports_pkeys.main);
+
+		abort_port_pkey_change(qp,
+				       &qp->qp_sec->ports_pkeys.alt,
+				       &qp->qp_sec->old_ports_pkeys.alt);
+	} else {
+		if (sec->ports_pkeys.main.state == IB_PORT_PKEY_CHANGING)
+			sec->ports_pkeys.main.state = IB_PORT_PKEY_VALID;
+
+		if (sec->ports_pkeys.alt.state == IB_PORT_PKEY_CHANGING)
+			sec->ports_pkeys.alt.state = IB_PORT_PKEY_VALID;
+	}
+
+	memset(&sec->old_ports_pkeys, 0, sizeof(sec->old_ports_pkeys));
+
+	return 0;
+}
+
+int ib_security_open_shared_qp(struct ib_qp *qp)
+{
+	struct ib_qp *real_qp = qp->real_qp;
+	int err;
+
+	err = ib_security_create_qp_security(qp);
+	if (err)
+		goto out;
+
+	mutex_lock(&real_qp->qp_sec->mutex);
+
+	if (real_qp->qp_sec->ports_pkeys.main.state != IB_PORT_PKEY_NOT_VALID)
+		err = enforce_qp_pkey_security(real_qp->device,
+					       real_qp->qp_sec->ports_pkeys.main.port_num,
+					       real_qp->qp_sec->ports_pkeys.main.pkey_index,
+					       qp->qp_sec);
+	if (err)
+		goto err;
+
+	if (real_qp->qp_sec->ports_pkeys.alt.state != IB_PORT_PKEY_NOT_VALID)
+		err = enforce_qp_pkey_security(real_qp->device,
+					       real_qp->qp_sec->ports_pkeys.alt.port_num,
+					       real_qp->qp_sec->ports_pkeys.alt.pkey_index,
+					       qp->qp_sec);
+
+	if (err)
+		goto err;
+
+	if (qp != real_qp)
+		list_add(&qp->qp_sec->shared_qp_list,
+			 &real_qp->qp_sec->shared_qp_list);
+err:
+	mutex_unlock(&real_qp->qp_sec->mutex);
+	if (err)
+		ib_security_destroy_qp(qp->qp_sec);
+
+out:
+	return err;
+}
+
+void ib_security_close_shared_qp(struct ib_qp_security *sec)
+{
+	struct ib_qp *real_qp = sec->qp->real_qp;
+
+	mutex_lock(&real_qp->qp_sec->mutex);
+	list_del(&sec->shared_qp_list);
+	mutex_unlock(&real_qp->qp_sec->mutex);
+
+	ib_security_destroy_qp(sec);
+}
+
+int ib_security_create_qp_security(struct ib_qp *qp)
+{
+	int err;
+
+	qp->qp_sec = kzalloc(sizeof(*qp->qp_sec), GFP_KERNEL);
+	if (!qp->qp_sec)
+		return -ENOMEM;
+
+	qp->qp_sec->qp = qp;
+	mutex_init(&qp->qp_sec->mutex);
+	INIT_LIST_HEAD(&qp->qp_sec->shared_qp_list);
+	err = security_ib_qp_alloc_security(qp->qp_sec);
+	if (err)
+		kfree(qp->qp_sec);
+
+	return err;
+}
+EXPORT_SYMBOL(ib_security_create_qp_security);
+
+void ib_security_destroy_qp(struct ib_qp_security *sec)
+{
+	security_ib_qp_free_security(sec);
+	kfree(sec);
+}
+
+int ib_security_modify_qp(struct ib_qp *qp,
+			  struct ib_qp_attr *qp_attr,
+			  int qp_attr_mask,
+			  struct ib_udata *udata)
+{
+	int err = 0;
+	bool enforce_security = affects_security_settings(qp,
+							  qp_attr,
+							  qp_attr_mask);
+
+	if (enforce_security) {
+		mutex_lock(&qp->qp_sec->mutex);
+
+		err = qp_modify_enforce_security(qp, qp_attr, qp_attr_mask);
+	}
+
+	if (!err)
+		err = qp->device->modify_qp(qp->real_qp,
+					    qp_attr,
+					    qp_attr_mask,
+					    udata);
+	if (enforce_security) {
+		cleanup_qp_pkey_associations(qp, !!err);
+		mutex_unlock(&qp->qp_sec->mutex);
+	}
+	return err;
+}
+EXPORT_SYMBOL(ib_security_modify_qp);
+
+#endif /* CONFIG_SECURITY_INFINIBAND */
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 6fdc7ec..6df15ea 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -37,7 +37,7 @@ 
 #include <linux/fs.h>
 #include <linux/slab.h>
 #include <linux/sched.h>
-
+#include <linux/security.h>
 #include <asm/uaccess.h>
 
 #include "uverbs.h"
@@ -1857,6 +1857,10 @@  static int create_qp(struct ib_uverbs_file *file,
 	}
 
 	if (cmd->qp_type != IB_QPT_XRC_TGT) {
+		ret = ib_security_create_qp_security(qp);
+		if (ret)
+			goto err_destroy;
+
 		qp->real_qp	  = qp;
 		qp->device	  = device;
 		qp->pd		  = pd;
@@ -2339,10 +2343,18 @@  ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
 		ret = ib_resolve_eth_dmac(qp, attr, &cmd.attr_mask);
 		if (ret)
 			goto release_qp;
-		ret = qp->device->modify_qp(qp, attr,
-			modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata);
+
+		ret = ib_security_modify_qp(qp,
+					    attr,
+					    modify_qp_mask(qp->qp_type,
+							   cmd.attr_mask),
+					    &udata);
 	} else {
-		ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask));
+		ret = ib_security_modify_qp(qp,
+					    attr,
+					    modify_qp_mask(qp->qp_type,
+							   cmd.attr_mask),
+					    NULL);
 	}
 
 	if (ret)
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 15b8adb..47000ee 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -44,6 +44,7 @@ 
 #include <linux/in.h>
 #include <linux/in6.h>
 #include <net/addrconf.h>
+#include <linux/security.h>
 
 #include <rdma/ib_verbs.h>
 #include <rdma/ib_cache.h>
@@ -681,12 +682,19 @@  static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp,
 {
 	struct ib_qp *qp;
 	unsigned long flags;
+	int err;
 
 	qp = kzalloc(sizeof *qp, GFP_KERNEL);
 	if (!qp)
 		return ERR_PTR(-ENOMEM);
 
 	qp->real_qp = real_qp;
+	err = ib_security_open_shared_qp(qp);
+	if (err) {
+		kfree(qp);
+		return ERR_PTR(err);
+	}
+
 	atomic_inc(&real_qp->usecnt);
 	qp->device = real_qp->device;
 	qp->event_handler = event_handler;
@@ -728,11 +736,16 @@  struct ib_qp *ib_create_qp(struct ib_pd *pd,
 {
 	struct ib_qp *qp, *real_qp;
 	struct ib_device *device;
+	int err;
 
 	device = pd ? pd->device : qp_init_attr->xrcd->device;
 	qp = device->create_qp(pd, qp_init_attr, NULL);
 
 	if (!IS_ERR(qp)) {
+		err = ib_security_create_qp_security(qp);
+		if (err)
+			goto destroy_qp;
+
 		qp->device     = device;
 		qp->real_qp    = qp;
 		qp->uobject    = NULL;
@@ -780,6 +793,10 @@  struct ib_qp *ib_create_qp(struct ib_pd *pd,
 	}
 
 	return qp;
+
+destroy_qp:
+	ib_destroy_qp(qp);
+	return ERR_PTR(err);
 }
 EXPORT_SYMBOL(ib_create_qp);
 
@@ -1180,7 +1197,7 @@  int ib_modify_qp(struct ib_qp *qp,
 	if (ret)
 		return ret;
 
-	return qp->device->modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL);
+	return ib_security_modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL);
 }
 EXPORT_SYMBOL(ib_modify_qp);
 
@@ -1209,6 +1226,7 @@  int ib_close_qp(struct ib_qp *qp)
 	spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
 
 	atomic_dec(&real_qp->usecnt);
+	ib_security_close_shared_qp(qp->qp_sec);
 	kfree(qp);
 
 	return 0;
@@ -1248,6 +1266,7 @@  int ib_destroy_qp(struct ib_qp *qp)
 	struct ib_pd *pd;
 	struct ib_cq *scq, *rcq;
 	struct ib_srq *srq;
+	struct ib_qp_security *sec;
 	int ret;
 
 	if (atomic_read(&qp->usecnt))
@@ -1260,6 +1279,7 @@  int ib_destroy_qp(struct ib_qp *qp)
 	scq  = qp->send_cq;
 	rcq  = qp->recv_cq;
 	srq  = qp->srq;
+	sec  = qp->qp_sec;
 
 	ret = qp->device->destroy_qp(qp);
 	if (!ret) {
@@ -1271,6 +1291,8 @@  int ib_destroy_qp(struct ib_qp *qp)
 			atomic_dec(&rcq->usecnt);
 		if (srq)
 			atomic_dec(&srq->usecnt);
+		if (sec)
+			ib_security_destroy_qp(sec);
 	}
 
 	return ret;
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 870c5ac..f71cb47 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -1416,8 +1416,34 @@  struct ib_srq {
 	} ext;
 };
 
+enum port_pkey_state {
+	IB_PORT_PKEY_NOT_VALID = 0,
+	IB_PORT_PKEY_VALID = 1,
+	IB_PORT_PKEY_CHANGING = 2,
+};
+
+struct ib_port_pkey {
+	enum port_pkey_state	state;
+	u16			pkey_index;
+	u8			port_num;
+};
+
+struct ib_ports_pkeys {
+	struct ib_port_pkey	main;
+	struct ib_port_pkey	alt;
+};
+
 struct ib_qp_security {
-	void *q_security;
+	struct ib_qp	       *qp;
+	/* Hold this mutex when changing port and pkey settings. */
+	struct mutex		mutex;
+	struct ib_ports_pkeys	ports_pkeys;
+	struct ib_ports_pkeys	old_ports_pkeys;
+	/* A list of all open shared QP handles.  Required to enforce security
+	 * properly for all users of a shared QP.
+	 */
+	struct list_head        shared_qp_list;
+	void                   *q_security;
 };
 
 struct ib_qp {