@@ -745,6 +745,8 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
u8 port_num;
struct ib_wc mad_wc;
struct ib_send_wr *send_wr = &mad_send_wr->send_wr;
+ size_t in_mad_size = max_mad_size(mad_agent_priv->qp_info->port_priv);
+ size_t out_mad_size;
if (device->node_type == RDMA_NODE_IB_SWITCH &&
smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
@@ -781,8 +783,8 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
local->mad_priv = NULL;
local->recv_mad_agent = NULL;
- mad_priv = alloc_mad_priv(max_mad_size(mad_agent_priv->qp_info->port_priv),
- GFP_ATOMIC);
+ out_mad_size = max_mad_size(mad_agent_priv->qp_info->port_priv);
+ mad_priv = alloc_mad_priv(out_mad_size, GFP_ATOMIC);
if (!mad_priv) {
ret = -ENOMEM;
dev_err(&device->dev, "No memory for local response MAD\n");
@@ -797,8 +799,9 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
/* No GRH for DR SMP */
ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,
- (struct ib_mad *)smp,
- (struct ib_mad *)&mad_priv->mad);
+ (struct ib_mad_hdr *)smp, in_mad_size,
+ (struct ib_mad_hdr *)&mad_priv->mad,
+ &out_mad_size);
switch (ret)
{
case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
@@ -2017,6 +2020,7 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
struct ib_mad_agent_private *mad_agent;
int port_num;
int ret = IB_MAD_RESULT_SUCCESS;
+ size_t resp_mad_size;
mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id;
qp_info = mad_list->mad_queue->qp_info;
@@ -2044,7 +2048,8 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
if (!validate_mad(&recv->mad.mad.mad_hdr, qp_info->qp->qp_num))
goto out;
- response = alloc_mad_priv(max_mad_size(port_priv), GFP_KERNEL);
+ resp_mad_size = max_mad_size(port_priv);
+ response = alloc_mad_priv(resp_mad_size, GFP_KERNEL);
if (!response) {
dev_err(&port_priv->device->dev,
"ib_mad_recv_done_handler no memory for response buffer\n");
@@ -2069,8 +2074,10 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
ret = port_priv->device->process_mad(port_priv->device, 0,
port_priv->port_num,
wc, &recv->grh,
- &recv->mad.mad,
- &response->mad.mad);
+ (struct ib_mad_hdr *)&recv->mad.mad,
+ max_mad_size(port_priv),
+ (struct ib_mad_hdr *)&response->mad.mad,
+ &resp_mad_size);
if (ret & IB_MAD_RESULT_SUCCESS) {
if (ret & IB_MAD_RESULT_CONSUMED)
goto out;
@@ -2693,8 +2700,9 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
mad_priv = mad;
mad = NULL;
} else {
- mad_priv =
- alloc_mad_priv(max_mad_size(qp_info->port_priv), GFP_KERNEL);
+ size_t mad_size = max_mad_size(qp_info->port_priv);
+
+ mad_priv = alloc_mad_priv(mad_size, GFP_KERNEL);
if (!mad_priv) {
dev_err(&qp_info->port_priv->device->dev,
"No memory for receive buffer\n");
@@ -326,6 +326,7 @@ static ssize_t show_pma_counter(struct ib_port *p, struct port_attribute *attr,
int width = (tab_attr->index >> 16) & 0xff;
struct ib_mad *in_mad = NULL;
struct ib_mad *out_mad = NULL;
+ size_t out_mad_size = sizeof(*out_mad);
ssize_t ret;
if (!p->ibdev->process_mad)
@@ -347,7 +348,9 @@ static ssize_t show_pma_counter(struct ib_port *p, struct port_attribute *attr,
in_mad->data[41] = p->port_num; /* PortSelect field */
if ((p->ibdev->process_mad(p->ibdev, IB_MAD_IGNORE_MKEY,
- p->port_num, NULL, NULL, in_mad, out_mad) &
+ p->port_num, NULL, NULL,
+ (struct ib_mad_hdr *)in_mad, sizeof(*in_mad),
+ (struct ib_mad_hdr *)out_mad, &out_mad_size) &
(IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY)) !=
(IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY)) {
ret = -EINVAL;
@@ -584,7 +584,10 @@ static int c2_process_mad(struct ib_device *ibdev,
u8 port_num,
struct ib_wc *in_wc,
struct ib_grh *in_grh,
- struct ib_mad *in_mad, struct ib_mad *out_mad)
+ struct ib_mad_hdr *in_mad,
+ size_t in_mad_size,
+ struct ib_mad_hdr *out_mad,
+ size_t *out_mad_size)
{
pr_debug("%s:%u\n", __func__, __LINE__);
return -ENOSYS;
@@ -87,7 +87,10 @@ static int iwch_process_mad(struct ib_device *ibdev,
u8 port_num,
struct ib_wc *in_wc,
struct ib_grh *in_grh,
- struct ib_mad *in_mad, struct ib_mad *out_mad)
+ struct ib_mad_hdr *in_mad,
+ size_t in_mad_size,
+ struct ib_mad_hdr *out_mad,
+ size_t *out_mad_size)
{
return -ENOSYS;
}
@@ -81,8 +81,11 @@ static int c4iw_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
static int c4iw_process_mad(struct ib_device *ibdev, int mad_flags,
u8 port_num, struct ib_wc *in_wc,
- struct ib_grh *in_grh, struct ib_mad *in_mad,
- struct ib_mad *out_mad)
+ struct ib_grh *in_grh,
+ struct ib_mad_hdr *in_mad,
+ size_t in_mad_size,
+ struct ib_mad_hdr *out_mad,
+ size_t *out_mad_size)
{
return -ENOSYS;
}
@@ -192,8 +192,8 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
struct ib_wc *in_wc, struct ib_grh *in_grh,
- struct ib_mad *in_mad,
- struct ib_mad *out_mad);
+ struct ib_mad_hdr *in, size_t in_mad_size,
+ struct ib_mad_hdr *out, size_t *out_mad_size);
void ehca_poll_eqs(unsigned long data);
@@ -218,9 +218,16 @@ perf_reply:
int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
struct ib_wc *in_wc, struct ib_grh *in_grh,
- struct ib_mad *in_mad, struct ib_mad *out_mad)
+ struct ib_mad_hdr *in, size_t in_mad_size,
+ struct ib_mad_hdr *out, size_t *out_mad_size)
{
int ret;
+ struct ib_mad *in_mad = (struct ib_mad *)in;
+ struct ib_mad *out_mad = (struct ib_mad *)out;
+
+ if (WARN_ON(in_mad_size != sizeof(*in_mad) ||
+ *out_mad_size != sizeof(*out_mad)))
+ return IB_MAD_RESULT_FAILURE;
if (!port_num || port_num > ibdev->phys_port_cnt || !in_wc)
return IB_MAD_RESULT_FAILURE;
@@ -1491,9 +1491,16 @@ bail:
*/
int ipath_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
struct ib_wc *in_wc, struct ib_grh *in_grh,
- struct ib_mad *in_mad, struct ib_mad *out_mad)
+ struct ib_mad_hdr *in, size_t in_mad_size,
+ struct ib_mad_hdr *out, size_t *out_mad_size)
{
int ret;
+ struct ib_mad *in_mad = (struct ib_mad *)in;
+ struct ib_mad *out_mad = (struct ib_mad *)out;
+
+ if (WARN_ON(in_mad_size != sizeof(*in_mad) ||
+ *out_mad_size != sizeof(*out_mad)))
+ return IB_MAD_RESULT_FAILURE;
switch (in_mad->mad_hdr.mgmt_class) {
case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
@@ -703,7 +703,8 @@ int ipath_process_mad(struct ib_device *ibdev,
u8 port_num,
struct ib_wc *in_wc,
struct ib_grh *in_grh,
- struct ib_mad *in_mad, struct ib_mad *out_mad);
+ struct ib_mad_hdr *in, size_t in_mad_size,
+ struct ib_mad_hdr *out, size_t *out_mad_size);
/*
* Compare the lower 24 bits of the two values.
@@ -868,8 +868,16 @@ static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
struct ib_wc *in_wc, struct ib_grh *in_grh,
- struct ib_mad *in_mad, struct ib_mad *out_mad)
+ struct ib_mad_hdr *in, size_t in_mad_size,
+ struct ib_mad_hdr *out, size_t *out_mad_size)
{
+ struct ib_mad *in_mad = (struct ib_mad *)in;
+ struct ib_mad *out_mad = (struct ib_mad *)out;
+
+ if (WARN_ON(in_mad_size != sizeof(*in_mad) ||
+ *out_mad_size != sizeof(*out_mad)))
+ return IB_MAD_RESULT_FAILURE;
+
switch (rdma_port_get_link_layer(ibdev, port_num)) {
case IB_LINK_LAYER_INFINIBAND:
return ib_process_mad(ibdev, mad_flags, port_num, in_wc,
@@ -710,7 +710,8 @@ int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int mad_ifc_flags,
void *in_mad, void *response_mad);
int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
struct ib_wc *in_wc, struct ib_grh *in_grh,
- struct ib_mad *in_mad, struct ib_mad *out_mad);
+ struct ib_mad_hdr *in, size_t in_mad_size,
+ struct ib_mad_hdr *out, size_t *out_mad_size);
int mlx4_ib_mad_init(struct mlx4_ib_dev *dev);
void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev);
@@ -59,10 +59,17 @@ int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
struct ib_wc *in_wc, struct ib_grh *in_grh,
- struct ib_mad *in_mad, struct ib_mad *out_mad)
+ struct ib_mad_hdr *in, size_t in_mad_size,
+ struct ib_mad_hdr *out, size_t *out_mad_size)
{
u16 slid;
int err;
+ struct ib_mad *in_mad = (struct ib_mad *)in;
+ struct ib_mad *out_mad = (struct ib_mad *)out;
+
+ if (WARN_ON(in_mad_size != sizeof(*in_mad) ||
+ *out_mad_size != sizeof(*out_mad)))
+ return IB_MAD_RESULT_FAILURE;
slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
@@ -587,7 +587,8 @@ int mlx5_ib_unmap_fmr(struct list_head *fmr_list);
int mlx5_ib_fmr_dealloc(struct ib_fmr *ibfmr);
int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
struct ib_wc *in_wc, struct ib_grh *in_grh,
- struct ib_mad *in_mad, struct ib_mad *out_mad);
+ struct ib_mad_hdr *in, size_t in_mad_size,
+ struct ib_mad_hdr *out, size_t *out_mad_size);
struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
struct ib_ucontext *context,
struct ib_udata *udata);
@@ -578,8 +578,8 @@ int mthca_process_mad(struct ib_device *ibdev,
u8 port_num,
struct ib_wc *in_wc,
struct ib_grh *in_grh,
- struct ib_mad *in_mad,
- struct ib_mad *out_mad);
+ struct ib_mad_hdr *in, size_t in_mad_size,
+ struct ib_mad_hdr *out, size_t *out_mad_size);
int mthca_create_agents(struct mthca_dev *dev);
void mthca_free_agents(struct mthca_dev *dev);
@@ -198,13 +198,19 @@ int mthca_process_mad(struct ib_device *ibdev,
u8 port_num,
struct ib_wc *in_wc,
struct ib_grh *in_grh,
- struct ib_mad *in_mad,
- struct ib_mad *out_mad)
+ struct ib_mad_hdr *in, size_t in_mad_size,
+ struct ib_mad_hdr *out, size_t *out_mad_size)
{
int err;
u16 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
u16 prev_lid = 0;
struct ib_port_attr pattr;
+ struct ib_mad *in_mad = (struct ib_mad *)in;
+ struct ib_mad *out_mad = (struct ib_mad *)out;
+
+ if (WARN_ON(in_mad_size != sizeof(*in_mad) ||
+ *out_mad_size != sizeof(*out_mad)))
+ return IB_MAD_RESULT_FAILURE;
/* Forward locally generated traps to the SM */
if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP &&
@@ -3222,7 +3222,8 @@ static int nes_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
*/
static int nes_process_mad(struct ib_device *ibdev, int mad_flags,
u8 port_num, struct ib_wc *in_wc, struct ib_grh *in_grh,
- struct ib_mad *in_mad, struct ib_mad *out_mad)
+ struct ib_mad_hdr *in, size_t in_mad_size,
+ struct ib_mad_hdr *out, size_t *out_mad_size)
{
nes_debug(NES_DBG_INIT, "\n");
return -ENOSYS;
@@ -198,10 +198,17 @@ int ocrdma_process_mad(struct ib_device *ibdev,
u8 port_num,
struct ib_wc *in_wc,
struct ib_grh *in_grh,
- struct ib_mad *in_mad, struct ib_mad *out_mad)
+ struct ib_mad_hdr *in, size_t in_mad_size,
+ struct ib_mad_hdr *out, size_t *out_mad_size)
{
int status;
struct ocrdma_dev *dev;
+ struct ib_mad *in_mad = (struct ib_mad *)in;
+ struct ib_mad *out_mad = (struct ib_mad *)out;
+
+ if (WARN_ON(in_mad_size != sizeof(*in_mad) ||
+ *out_mad_size != sizeof(*out_mad)))
+ return IB_MAD_RESULT_FAILURE;
switch (in_mad->mad_hdr.mgmt_class) {
case IB_MGMT_CLASS_PERF_MGMT:
@@ -44,5 +44,6 @@ int ocrdma_process_mad(struct ib_device *,
u8 port_num,
struct ib_wc *in_wc,
struct ib_grh *in_grh,
- struct ib_mad *in_mad, struct ib_mad *out_mad);
+ struct ib_mad_hdr *in, size_t in_mad_size,
+ struct ib_mad_hdr *out, size_t *out_mad_size);
#endif /* __OCRDMA_AH_H__ */
@@ -29,6 +29,7 @@
#define __OCRDMA_STATS_H__
#include <linux/debugfs.h>
+#include <rdma/ib_mad.h>
#include "ocrdma.h"
#include "ocrdma_hw.h"
@@ -2402,11 +2402,18 @@ bail:
*/
int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
struct ib_wc *in_wc, struct ib_grh *in_grh,
- struct ib_mad *in_mad, struct ib_mad *out_mad)
+ struct ib_mad_hdr *in, size_t in_mad_size,
+ struct ib_mad_hdr *out, size_t *out_mad_size)
{
int ret;
struct qib_ibport *ibp = to_iport(ibdev, port);
struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ struct ib_mad *in_mad = (struct ib_mad *)in;
+ struct ib_mad *out_mad = (struct ib_mad *)out;
+
+ if (WARN_ON(in_mad_size != sizeof(*in_mad) ||
+ *out_mad_size != sizeof(*out_mad)))
+ return IB_MAD_RESULT_FAILURE;
switch (in_mad->mad_hdr.mgmt_class) {
case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
@@ -873,7 +873,8 @@ void qib_sys_guid_chg(struct qib_ibport *ibp);
void qib_node_desc_chg(struct qib_ibport *ibp);
int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
struct ib_wc *in_wc, struct ib_grh *in_grh,
- struct ib_mad *in_mad, struct ib_mad *out_mad);
+ struct ib_mad_hdr *in, size_t in_mad_size,
+ struct ib_mad_hdr *out, size_t *out_mad_size);
int qib_create_agents(struct qib_ibdev *dev);
void qib_free_agents(struct qib_ibdev *dev);
@@ -1452,7 +1452,7 @@ struct ib_flow {
struct ib_uobject *uobject;
};
-struct ib_mad;
+struct ib_mad_hdr;
struct ib_grh;
enum ib_process_mad_flags {
@@ -1693,8 +1693,10 @@ struct ib_device {
u8 port_num,
struct ib_wc *in_wc,
struct ib_grh *in_grh,
- struct ib_mad *in_mad,
- struct ib_mad *out_mad);
+ struct ib_mad_hdr *in_mad,
+ size_t in_mad_size,
+ struct ib_mad_hdr *out_mad,
+ size_t *out_mad_size);
struct ib_xrcd * (*alloc_xrcd)(struct ib_device *device,
struct ib_ucontext *ucontext,
struct ib_udata *udata);