@@ -34,7 +34,6 @@
#define EF100_MAX_VIS 4096
#define EF100_NUM_MCDI_BUFFERS 1
-#define MCDI_BUF_LEN (8 + MCDI_CTL_SDU_LEN_MAX)
#define EF100_RESET_PORT ((ETH_RESET_MAC | ETH_RESET_PHY) << ETH_RESET_SHARED_SHIFT)
@@ -228,10 +228,19 @@ static int vdpa_allocate_vis(struct efx_nic *efx, unsigned int *allocated_vis)
static void ef100_vdpa_delete(struct efx_nic *efx)
{
struct vdpa_device *vdpa_dev;
+ int rc;
if (efx->vdpa_nic) {
vdpa_dev = &efx->vdpa_nic->vdpa_dev;
ef100_vdpa_reset(vdpa_dev);
+ if (efx->mcdi_buf_mode == EFX_BUF_MODE_VDPA) {
+ rc = ef100_vdpa_map_mcdi_buffer(efx);
+ if (rc) {
+ pci_err(efx->pci_dev,
+ "map_mcdi_buffer failed, err: %d\n",
+ rc);
+ }
+ }
/* replace with _vdpa_unregister_device later */
put_device(&vdpa_dev->dev);
@@ -281,6 +290,21 @@ static int get_net_config(struct ef100_vdpa_nic *vdpa_nic)
return 0;
}
+static void unmap_mcdi_buffer(struct efx_nic *efx)
+{
+ struct ef100_nic_data *nic_data = efx->nic_data;
+ struct efx_mcdi_iface *mcdi;
+
+ mcdi = efx_mcdi(efx);
+ spin_lock_bh(&mcdi->iface_lock);
+ /* Save current MCDI mode to be restored later */
+ efx->vdpa_nic->mcdi_mode = mcdi->mode;
+ efx->mcdi_buf_mode = EFX_BUF_MODE_VDPA;
+ mcdi->mode = MCDI_MODE_FAIL;
+ spin_unlock_bh(&mcdi->iface_lock);
+ efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
+}
+
static struct ef100_vdpa_nic *ef100_vdpa_create(struct efx_nic *efx,
const char *dev_name,
enum ef100_vdpa_class dev_type,
@@ -347,6 +371,7 @@ static struct ef100_vdpa_nic *ef100_vdpa_create(struct efx_nic *efx,
for (i = 0; i < EF100_VDPA_MAC_FILTER_NTYPES; i++)
vdpa_nic->filters[i].filter_id = EFX_INVALID_FILTER_ID;
+ unmap_mcdi_buffer(efx);
rc = get_net_config(vdpa_nic);
if (rc)
goto err_put_device;
@@ -148,6 +148,7 @@ struct ef100_vdpa_filter {
* @status: device status as per VIRTIO spec
* @features: negotiated feature bits
* @max_queue_pairs: maximum number of queue pairs supported
+ * @mcdi_mode: MCDI mode at the time of unmapping VF mcdi buffer
* @net_config: virtio_net_config data
* @vring: vring information of the vDPA device.
* @mac_address: mac address of interface associated with this vdpa device
@@ -167,6 +168,7 @@ struct ef100_vdpa_nic {
u8 status;
u64 features;
u32 max_queue_pairs;
+ enum efx_mcdi_mode mcdi_mode;
struct virtio_net_config net_config;
struct ef100_vdpa_vring_info vring[EF100_VDPA_MAX_QUEUES_PAIRS * 2];
u8 *mac_address;
@@ -186,6 +188,7 @@ int ef100_vdpa_add_filter(struct ef100_vdpa_nic *vdpa_nic,
int ef100_vdpa_init_vring(struct ef100_vdpa_nic *vdpa_nic, u16 idx);
void ef100_vdpa_irq_vectors_free(void *data);
int ef100_vdpa_reset(struct vdpa_device *vdev);
+int ef100_vdpa_map_mcdi_buffer(struct efx_nic *efx);
static inline bool efx_vdpa_is_little_endian(struct ef100_vdpa_nic *vdpa_nic)
{
@@ -711,12 +711,47 @@ static int ef100_vdpa_suspend(struct vdpa_device *vdev)
mutex_unlock(&vdpa_nic->lock);
return rc;
}
+
+int ef100_vdpa_map_mcdi_buffer(struct efx_nic *efx)
+{
+ struct ef100_nic_data *nic_data = efx->nic_data;
+ struct efx_mcdi_iface *mcdi;
+ int rc;
+
+ /* Update VF's MCDI buffer when switching out of vdpa mode */
+ rc = efx_nic_alloc_buffer(efx, &nic_data->mcdi_buf,
+ MCDI_BUF_LEN, GFP_KERNEL);
+ if (rc)
+ return rc;
+
+ mcdi = efx_mcdi(efx);
+ spin_lock_bh(&mcdi->iface_lock);
+ mcdi->mode = efx->vdpa_nic->mcdi_mode;
+ efx->mcdi_buf_mode = EFX_BUF_MODE_EF100;
+ spin_unlock_bh(&mcdi->iface_lock);
+
+ return 0;
+}
+
static void ef100_vdpa_free(struct vdpa_device *vdev)
{
struct ef100_vdpa_nic *vdpa_nic = get_vdpa_nic(vdev);
+ int rc;
int i;
if (vdpa_nic) {
+ if (vdpa_nic->efx->mcdi_buf_mode == EFX_BUF_MODE_VDPA) {
+ /* This will only be called via call to put_device()
+ * on vdpa device creation failure
+ */
+ rc = ef100_vdpa_map_mcdi_buffer(vdpa_nic->efx);
+ if (rc) {
+ dev_err(&vdev->dev,
+ "map_mcdi_buffer failed, err: %d\n",
+ rc);
+ }
+ }
+
for (i = 0; i < (vdpa_nic->max_queue_pairs * 2); i++) {
reset_vring(vdpa_nic, i);
if (vdpa_nic->vring[i].vring_ctx)
@@ -8,6 +8,9 @@
#ifndef EFX_MCDI_H
#define EFX_MCDI_H
+#include "mcdi_pcol.h"
+
+#define MCDI_BUF_LEN (8 + MCDI_CTL_SDU_LEN_MAX)
/**
* enum efx_mcdi_state - MCDI request handling state
@@ -848,6 +848,16 @@ enum efx_xdp_tx_queues_mode {
struct efx_mae;
+/**
+ * enum efx_buf_alloc_mode - buffer allocation mode
+ * @EFX_BUF_MODE_EF100: buffer setup in ef100 mode
+ * @EFX_BUF_MODE_VDPA: buffer setup in vdpa mode
+ */
+enum efx_buf_alloc_mode {
+ EFX_BUF_MODE_EF100,
+ EFX_BUF_MODE_VDPA
+};
+
/**
* struct efx_nic - an Efx NIC
* @name: Device name (net device name or bus id before net device registered)
@@ -877,6 +887,7 @@ struct efx_mae;
* @irq_rx_mod_step_us: Step size for IRQ moderation for RX event queues
* @irq_rx_moderation_us: IRQ moderation time for RX event queues
* @msg_enable: Log message enable flags
+ * @mcdi_buf_mode: mcdi buffer allocation mode
* @state: Device state number (%STATE_*). Serialised by the rtnl_lock.
* @reset_pending: Bitmask for pending resets
* @tx_queue: TX DMA queues
@@ -1045,6 +1056,7 @@ struct efx_nic {
u32 msg_enable;
enum nic_state state;
+ enum efx_buf_alloc_mode mcdi_buf_mode;
unsigned long reset_pending;
struct efx_channel *channel[EFX_MAX_CHANNELS];
To avoid clash of IOVA range of VF's MCDI DMA buffer with the guest buffer IOVAs, unmap the MCDI buffer when switching to vDPA mode and use PF's IOMMU domain for running VF's MCDI commands. Signed-off-by: Gautam Dawar <gautam.dawar@amd.com> --- drivers/net/ethernet/sfc/ef100_nic.c | 1 - drivers/net/ethernet/sfc/ef100_vdpa.c | 25 ++++++++++++++++ drivers/net/ethernet/sfc/ef100_vdpa.h | 3 ++ drivers/net/ethernet/sfc/ef100_vdpa_ops.c | 35 +++++++++++++++++++++++ drivers/net/ethernet/sfc/mcdi.h | 3 ++ drivers/net/ethernet/sfc/net_driver.h | 12 ++++++++ 6 files changed, 78 insertions(+), 1 deletion(-)