@@ -736,6 +736,8 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
u8 port_num;
struct ib_wc mad_wc;
struct ib_send_wr *send_wr = &mad_send_wr->send_wr;
+ size_t in_mad_size = sizeof(struct ib_mad);
+ size_t out_mad_size = sizeof(struct ib_mad);
if (device->node_type == RDMA_NODE_IB_SWITCH &&
smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
@@ -786,8 +788,9 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
/* No GRH for DR SMP */
ret = device->process_mad(device, 0, port_num, &mad_wc, NULL,
- (struct ib_mad *)smp,
- (struct ib_mad *)&mad_priv->mad);
+ (struct ib_mad_hdr *)smp, in_mad_size,
+ (struct ib_mad_hdr *)&mad_priv->mad,
+ &out_mad_size);
switch (ret)
{
case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
@@ -2038,11 +2041,14 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
local:
/* Give driver "right of first refusal" on incoming MAD */
if (port_priv->device->process_mad) {
+ size_t resp_mad_size = sizeof(struct ib_mad);
ret = port_priv->device->process_mad(port_priv->device, 0,
port_priv->port_num,
wc, &recv->grh,
- &recv->mad.mad,
- &response->mad.mad);
+ (struct ib_mad_hdr *)&recv->mad.mad,
+ sizeof(struct ib_mad),
+ (struct ib_mad_hdr *)&response->mad.mad,
+ &resp_mad_size);
if (ret & IB_MAD_RESULT_SUCCESS) {
if (ret & IB_MAD_RESULT_CONSUMED)
goto out;
@@ -347,7 +347,9 @@ static ssize_t show_pma_counter(struct ib_port *p, struct port_attribute *attr,
in_mad->data[41] = p->port_num; /* PortSelect field */
if ((p->ibdev->process_mad(p->ibdev, IB_MAD_IGNORE_MKEY,
- p->port_num, NULL, NULL, in_mad, out_mad) &
+ p->port_num, NULL, NULL,
+ (struct ib_mad_hdr *)in_mad, sizeof(*in_mad),
+ (struct ib_mad_hdr *)out_mad, NULL) &
(IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY)) !=
(IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY)) {
ret = -EINVAL;
@@ -584,7 +584,10 @@ static int c2_process_mad(struct ib_device *ibdev,
u8 port_num,
struct ib_wc *in_wc,
struct ib_grh *in_grh,
- struct ib_mad *in_mad, struct ib_mad *out_mad)
+ struct ib_mad_hdr *in_mad,
+ size_t in_mad_size,
+ struct ib_mad_hdr *out_mad,
+ size_t *out_mad_size)
{
pr_debug("%s:%u\n", __func__, __LINE__);
return -ENOSYS;
@@ -87,7 +87,10 @@ static int iwch_process_mad(struct ib_device *ibdev,
u8 port_num,
struct ib_wc *in_wc,
struct ib_grh *in_grh,
- struct ib_mad *in_mad, struct ib_mad *out_mad)
+ struct ib_mad_hdr *in_mad,
+ size_t in_mad_size,
+ struct ib_mad_hdr *out_mad,
+ size_t *out_mad_size)
{
return -ENOSYS;
}
@@ -81,8 +81,11 @@ static int c4iw_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
static int c4iw_process_mad(struct ib_device *ibdev, int mad_flags,
u8 port_num, struct ib_wc *in_wc,
- struct ib_grh *in_grh, struct ib_mad *in_mad,
- struct ib_mad *out_mad)
+ struct ib_grh *in_grh,
+ struct ib_mad_hdr *in_mad,
+ size_t in_mad_size,
+ struct ib_mad_hdr *out_mad,
+ size_t *out_mad_size)
{
return -ENOSYS;
}
@@ -218,9 +218,15 @@ perf_reply:
int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
struct ib_wc *in_wc, struct ib_grh *in_grh,
- struct ib_mad *in_mad, struct ib_mad *out_mad)
+ struct ib_mad_hdr *in, size_t in_mad_size,
+ struct ib_mad_hdr *out, size_t *out_mad_size)
{
int ret;
+ struct ib_mad *in_mad = (struct ib_mad *)in;
+ struct ib_mad *out_mad = (struct ib_mad *)out;
+
+ if (in_mad_size != sizeof(*in_mad) || *out_mad_size != sizeof(*out_mad))
+ return IB_MAD_RESULT_FAILURE;
if (!port_num || port_num > ibdev->phys_port_cnt || !in_wc)
return IB_MAD_RESULT_FAILURE;
@@ -1491,9 +1491,15 @@ bail:
*/
int ipath_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
struct ib_wc *in_wc, struct ib_grh *in_grh,
- struct ib_mad *in_mad, struct ib_mad *out_mad)
+ struct ib_mad_hdr *in, size_t in_mad_size,
+ struct ib_mad_hdr *out, size_t *out_mad_size)
{
int ret;
+ struct ib_mad *in_mad = (struct ib_mad *)in;
+ struct ib_mad *out_mad = (struct ib_mad *)out;
+
+ if (in_mad_size != sizeof(*in_mad) || *out_mad_size != sizeof(*out_mad))
+ return IB_MAD_RESULT_FAILURE;
switch (in_mad->mad_hdr.mgmt_class) {
case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
@@ -703,7 +703,8 @@ int ipath_process_mad(struct ib_device *ibdev,
u8 port_num,
struct ib_wc *in_wc,
struct ib_grh *in_grh,
- struct ib_mad *in_mad, struct ib_mad *out_mad);
+ struct ib_mad_hdr *in, size_t in_mad_size,
+ struct ib_mad_hdr *out, size_t *out_mad_size);
/*
* Compare the lower 24 bits of the two values.
@@ -856,8 +856,15 @@ static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
struct ib_wc *in_wc, struct ib_grh *in_grh,
- struct ib_mad *in_mad, struct ib_mad *out_mad)
+ struct ib_mad_hdr *in, size_t in_mad_size,
+ struct ib_mad_hdr *out, size_t *out_mad_size)
{
+ struct ib_mad *in_mad = (struct ib_mad *)in;
+ struct ib_mad *out_mad = (struct ib_mad *)out;
+
+ if (in_mad_size != sizeof(*in_mad) || *out_mad_size != sizeof(*out_mad))
+ return IB_MAD_RESULT_FAILURE;
+
switch (rdma_port_get_link_layer(ibdev, port_num)) {
case IB_LINK_LAYER_INFINIBAND:
return ib_process_mad(ibdev, mad_flags, port_num, in_wc,
@@ -690,7 +690,8 @@ int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int mad_ifc_flags,
void *in_mad, void *response_mad);
int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
struct ib_wc *in_wc, struct ib_grh *in_grh,
- struct ib_mad *in_mad, struct ib_mad *out_mad);
+ struct ib_mad_hdr *in, size_t in_mad_size,
+ struct ib_mad_hdr *out, size_t *out_mad_size);
int mlx4_ib_mad_init(struct mlx4_ib_dev *dev);
void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev);
@@ -59,10 +59,16 @@ int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
struct ib_wc *in_wc, struct ib_grh *in_grh,
- struct ib_mad *in_mad, struct ib_mad *out_mad)
+ struct ib_mad_hdr *in, size_t in_mad_size,
+ struct ib_mad_hdr *out, size_t *out_mad_size)
{
u16 slid;
int err;
+ struct ib_mad *in_mad = (struct ib_mad *)in;
+ struct ib_mad *out_mad = (struct ib_mad *)out;
+
+ if (in_mad_size != sizeof(*in_mad) || *out_mad_size != sizeof(*out_mad))
+ return IB_MAD_RESULT_FAILURE;
slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
@@ -578,8 +578,8 @@ int mthca_process_mad(struct ib_device *ibdev,
u8 port_num,
struct ib_wc *in_wc,
struct ib_grh *in_grh,
- struct ib_mad *in_mad,
- struct ib_mad *out_mad);
+ struct ib_mad_hdr *in, size_t in_mad_size,
+ struct ib_mad_hdr *out, size_t *out_mad_size);
int mthca_create_agents(struct mthca_dev *dev);
void mthca_free_agents(struct mthca_dev *dev);
@@ -198,13 +198,18 @@ int mthca_process_mad(struct ib_device *ibdev,
u8 port_num,
struct ib_wc *in_wc,
struct ib_grh *in_grh,
- struct ib_mad *in_mad,
- struct ib_mad *out_mad)
+ struct ib_mad_hdr *in, size_t in_mad_size,
+ struct ib_mad_hdr *out, size_t *out_mad_size)
{
int err;
u16 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
u16 prev_lid = 0;
struct ib_port_attr pattr;
+ struct ib_mad *in_mad = (struct ib_mad *)in;
+ struct ib_mad *out_mad = (struct ib_mad *)out;
+
+ if (in_mad_size != sizeof(*in_mad) || *out_mad_size != sizeof(*out_mad))
+ return IB_MAD_RESULT_FAILURE;
/* Forward locally generated traps to the SM */
if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP &&
@@ -3223,7 +3223,8 @@ static int nes_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
*/
static int nes_process_mad(struct ib_device *ibdev, int mad_flags,
u8 port_num, struct ib_wc *in_wc, struct ib_grh *in_grh,
- struct ib_mad *in_mad, struct ib_mad *out_mad)
+ struct ib_mad_hdr *in, size_t in_mad_size,
+ struct ib_mad_hdr *out, size_t *out_mad_size)
{
nes_debug(NES_DBG_INIT, "\n");
return -ENOSYS;
@@ -192,7 +192,8 @@ int ocrdma_process_mad(struct ib_device *ibdev,
u8 port_num,
struct ib_wc *in_wc,
struct ib_grh *in_grh,
- struct ib_mad *in_mad, struct ib_mad *out_mad)
+ struct ib_mad_hdr *in, size_t in_mad_size,
+ struct ib_mad_hdr *out, size_t *out_mad_size)
{
return IB_MAD_RESULT_SUCCESS;
}
@@ -2402,11 +2402,17 @@ bail:
*/
int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
struct ib_wc *in_wc, struct ib_grh *in_grh,
- struct ib_mad *in_mad, struct ib_mad *out_mad)
+ struct ib_mad_hdr *in, size_t in_mad_size,
+ struct ib_mad_hdr *out, size_t *out_mad_size)
{
int ret;
struct qib_ibport *ibp = to_iport(ibdev, port);
struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ struct ib_mad *in_mad = (struct ib_mad *)in;
+ struct ib_mad *out_mad = (struct ib_mad *)out;
+
+ if (in_mad_size != sizeof(*in_mad) || *out_mad_size != sizeof(*out_mad))
+ return IB_MAD_RESULT_FAILURE;
switch (in_mad->mad_hdr.mgmt_class) {
case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
@@ -873,7 +873,8 @@ void qib_sys_guid_chg(struct qib_ibport *ibp);
void qib_node_desc_chg(struct qib_ibport *ibp);
int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
struct ib_wc *in_wc, struct ib_grh *in_grh,
- struct ib_mad *in_mad, struct ib_mad *out_mad);
+ struct ib_mad_hdr *in, size_t in_mad_size,
+ struct ib_mad_hdr *out, size_t *out_mad_size);
int qib_create_agents(struct qib_ibdev *dev);
void qib_free_agents(struct qib_ibdev *dev);
@@ -1363,7 +1363,7 @@ struct ib_flow {
struct ib_uobject *uobject;
};
-struct ib_mad;
+struct ib_mad_hdr;
struct ib_grh;
enum ib_process_mad_flags {
@@ -1595,8 +1595,10 @@ struct ib_device {
u8 port_num,
struct ib_wc *in_wc,
struct ib_grh *in_grh,
- struct ib_mad *in_mad,
- struct ib_mad *out_mad);
+ struct ib_mad_hdr *in_mad,
+ size_t in_mad_size,
+ struct ib_mad_hdr *out_mad,
+ size_t *out_mad_size);
struct ib_xrcd * (*alloc_xrcd)(struct ib_device *device,
struct ib_ucontext *ucontext,
struct ib_udata *udata);