@@ -64,7 +64,6 @@ struct nicpf {
u32 *speed;
u16 cpi_base[MAX_NUM_VFS_SUPPORTED];
u16 rssi_base[MAX_NUM_VFS_SUPPORTED];
- bool mbx_lock[MAX_NUM_VFS_SUPPORTED];
/* MSI-X */
u8 num_vec;
@@ -954,8 +953,6 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
int i;
int ret = 0;
- nic->mbx_lock[vf] = true;
-
mbx_addr = nic_get_mbx_addr(vf);
mbx_data = (u64 *)&mbx;
@@ -975,7 +972,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
nic->duplex[vf] = 0;
nic->speed[vf] = 0;
}
- goto unlock;
+ return;
case NIC_MBOX_MSG_QS_CFG:
reg_addr = NIC_PF_QSET_0_127_CFG |
(mbx.qs.num << NIC_QS_ID_SHIFT);
@@ -1044,7 +1041,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
break;
case NIC_MBOX_MSG_RSS_SIZE:
nic_send_rss_size(nic, vf);
- goto unlock;
+ return;
case NIC_MBOX_MSG_RSS_CFG:
case NIC_MBOX_MSG_RSS_CFG_CONT:
nic_config_rss(nic, &mbx.rss_cfg);
@@ -1062,19 +1059,19 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
break;
case NIC_MBOX_MSG_ALLOC_SQS:
nic_alloc_sqs(nic, &mbx.sqs_alloc);
- goto unlock;
+ return;
case NIC_MBOX_MSG_NICVF_PTR:
nic->nicvf[vf] = mbx.nicvf.nicvf;
break;
case NIC_MBOX_MSG_PNICVF_PTR:
nic_send_pnicvf(nic, vf);
- goto unlock;
+ return;
case NIC_MBOX_MSG_SNICVF_PTR:
nic_send_snicvf(nic, &mbx.nicvf);
- goto unlock;
+ return;
case NIC_MBOX_MSG_BGX_STATS:
nic_get_bgx_stats(nic, &mbx.bgx_stats);
- goto unlock;
+ return;
case NIC_MBOX_MSG_LOOPBACK:
ret = nic_config_loopback(nic, &mbx.lbk);
break;
@@ -1083,7 +1080,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
break;
case NIC_MBOX_MSG_PFC:
nic_pause_frame(nic, vf, &mbx.pfc);
- goto unlock;
+ return;
case NIC_MBOX_MSG_PTP_CFG:
nic_config_timestamp(nic, vf, &mbx.ptp);
break;
@@ -1134,8 +1131,6 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
mbx.msg.msg, vf);
nic_mbx_send_nack(nic, vf);
}
-unlock:
- nic->mbx_lock[vf] = false;
}
static irqreturn_t nic_mbx_intr_handler(int irq, void *nic_irq)
@@ -1313,18 +1308,18 @@ static void nic_poll_for_link(struct work_struct *work)
if (nic->link[vf] == link.link_up)
continue;
- if (!nic->mbx_lock[vf]) {
- nic->link[vf] = link.link_up;
- nic->duplex[vf] = link.duplex;
- nic->speed[vf] = link.speed;
+ nic->link[vf] = link.link_up;
+ nic->duplex[vf] = link.duplex;
+ nic->speed[vf] = link.speed;
- /* Send a mbox message to VF with current link status */
- mbx.link_status.link_up = link.link_up;
- mbx.link_status.duplex = link.duplex;
- mbx.link_status.speed = link.speed;
- mbx.link_status.mac_type = link.mac_type;
+ /* Send a mbox message to VF with current link status */
+ mbx.link_status.link_up = link.link_up;
+ mbx.link_status.duplex = link.duplex;
+ mbx.link_status.speed = link.speed;
+ mbx.link_status.mac_type = link.mac_type;
+
+ if (!nic_is_mbox_intr_active(nic, vf))
nic_send_msg_to_vf(nic, vf, &mbx);
- }
}
queue_delayed_work(nic->check_link, &nic->dwork, HZ * 2);
}
@@ -128,6 +128,9 @@ int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
mutex_lock(&nic->rx_mode_mtx);
+ while (nicvf_check_is_intr_active(nic, NICVF_INTR_MBOX, 0))
+ msleep(1);
+
nic->pf_acked = false;
nic->pf_nacked = false;
In order to prevent mailbox data re-writing at VF side we need to check if there is an active mailbox IRQ from PF, and if there is no one proceed with sending message to PF. Having spinlock at irq handler and message send routing wont help since by the moment when code flow would reach the irq handler and acquire spinlock the message send routine could be already invoked and thus re-write data in the mailbox. The same is true for PF while sending messages to VF. This commit is to implement mailbox IRQ status check before sending message to VF from PF. Same is for sending message to PF from VF. Signed-off-by: Vadim Lomovtsev <vlomovtsev@marvell.com> --- .../net/ethernet/cavium/thunder/nic_main.c | 39 ++++++++----------- .../net/ethernet/cavium/thunder/nicvf_main.c | 3 ++ 2 files changed, 20 insertions(+), 22 deletions(-)