@@ -290,6 +290,7 @@ struct idpf_port_stats {
* @port_stats: per port csum, header split, and other offload stats
* @link_up: True if link is up
* @sw_marker_wq: workqueue for marker packets
+ * @vport_cfg_lock: Lock to protect access to vports during alloc/dealloc/reset
*/
struct idpf_vport {
u16 num_txq;
@@ -334,6 +335,7 @@ struct idpf_vport {
bool link_up;
wait_queue_head_t sw_marker_wq;
+ struct mutex vport_cfg_lock;
};
/**
@@ -527,7 +529,6 @@ struct idpf_vc_xn_manager;
* @req_tx_splitq: TX split or single queue model to request
* @req_rx_splitq: RX split or single queue model to request
* @vport_init_lock: Lock to protect vport init, re-init, and deinit flow
- * @vport_cfg_lock: Lock to protect the vport config flow
* @vector_lock: Lock to protect vector distribution
* @queue_lock: Lock to protect queue distribution
* @vc_buf_lock: Lock to protect virtchnl buffer
@@ -585,7 +586,6 @@ struct idpf_adapter {
bool req_rx_splitq;
struct mutex vport_init_lock;
- struct mutex vport_cfg_lock;
struct mutex vector_lock;
struct mutex queue_lock;
struct mutex vc_buf_lock;
@@ -812,23 +812,23 @@ static inline void idpf_vport_init_unlock(struct idpf_adapter *adapter)
/**
* idpf_vport_cfg_lock - Acquire the vport config lock
- * @adapter: private data struct
+ * @vport: private data struct
*
* This lock should be used by non-datapath code to protect against vport
* destruction.
*/
-static inline void idpf_vport_cfg_lock(struct idpf_adapter *adapter)
+static inline void idpf_vport_cfg_lock(struct idpf_vport *vport)
{
- mutex_lock(&adapter->vport_cfg_lock);
+ mutex_lock(&vport->vport_cfg_lock);
}
/**
* idpf_vport_cfg_unlock - Release the vport config lock
- * @adapter: private data struct
+ * @vport: private data struct
*/
-static inline void idpf_vport_cfg_unlock(struct idpf_adapter *adapter)
+static inline void idpf_vport_cfg_unlock(struct idpf_vport *vport)
{
- mutex_unlock(&adapter->vport_cfg_lock);
+ mutex_unlock(&vport->vport_cfg_lock);
}
void idpf_statistics_task(struct work_struct *work);
@@ -14,23 +14,22 @@
static int idpf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
u32 __always_unused *rule_locs)
{
- struct idpf_adapter *adapter = idpf_netdev_to_adapter(netdev);
struct idpf_vport *vport;
- idpf_vport_cfg_lock(adapter);
vport = idpf_netdev_to_vport(netdev);
+ idpf_vport_cfg_lock(vport);
switch (cmd->cmd) {
case ETHTOOL_GRXRINGS:
cmd->data = vport->num_rxq;
- idpf_vport_cfg_unlock(adapter);
+ idpf_vport_cfg_unlock(vport);
return 0;
default:
break;
}
- idpf_vport_cfg_unlock(adapter);
+ idpf_vport_cfg_unlock(vport);
return -EOPNOTSUPP;
}
@@ -86,11 +85,13 @@ static int idpf_get_rxfh(struct net_device *netdev,
struct idpf_netdev_priv *np = netdev_priv(netdev);
struct idpf_rss_data *rss_data;
struct idpf_adapter *adapter;
+ struct idpf_vport *vport;
int err = 0;
u16 i;
adapter = np->adapter;
- idpf_vport_cfg_lock(adapter);
+ vport = np->vport;
+ idpf_vport_cfg_lock(vport);
if (!idpf_is_cap_ena_all(adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS)) {
err = -EOPNOTSUPP;
@@ -112,7 +113,7 @@ static int idpf_get_rxfh(struct net_device *netdev,
}
unlock_mutex:
- idpf_vport_cfg_unlock(adapter);
+ idpf_vport_cfg_unlock(vport);
return err;
}
@@ -137,8 +138,8 @@ static int idpf_set_rxfh(struct net_device *netdev,
int err = 0;
u16 lut;
- idpf_vport_cfg_lock(adapter);
vport = idpf_netdev_to_vport(netdev);
+ idpf_vport_cfg_lock(vport);
if (!idpf_is_cap_ena_all(adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS)) {
err = -EOPNOTSUPP;
@@ -166,7 +167,7 @@ static int idpf_set_rxfh(struct net_device *netdev,
err = idpf_config_rss(vport);
unlock_mutex:
- idpf_vport_cfg_unlock(adapter);
+ idpf_vport_cfg_unlock(vport);
return err;
}
@@ -219,7 +220,6 @@ static void idpf_get_channels(struct net_device *netdev,
static int idpf_set_channels(struct net_device *netdev,
struct ethtool_channels *ch)
{
- struct idpf_adapter *adapter = idpf_netdev_to_adapter(netdev);
struct idpf_vport_config *vport_config;
unsigned int num_req_tx_q;
unsigned int num_req_rx_q;
@@ -234,8 +234,8 @@ static int idpf_set_channels(struct net_device *netdev,
return -EINVAL;
}
- idpf_vport_cfg_lock(adapter);
vport = idpf_netdev_to_vport(netdev);
+ idpf_vport_cfg_lock(vport);
idx = vport->idx;
vport_config = vport->adapter->vport_config[idx];
@@ -278,7 +278,7 @@ static int idpf_set_channels(struct net_device *netdev,
}
unlock_mutex:
- idpf_vport_cfg_unlock(adapter);
+ idpf_vport_cfg_unlock(vport);
return err;
}
@@ -298,11 +298,10 @@ static void idpf_get_ringparam(struct net_device *netdev,
struct kernel_ethtool_ringparam *kring,
struct netlink_ext_ack *ext_ack)
{
- struct idpf_adapter *adapter = idpf_netdev_to_adapter(netdev);
struct idpf_vport *vport;
- idpf_vport_cfg_lock(adapter);
vport = idpf_netdev_to_vport(netdev);
+ idpf_vport_cfg_lock(vport);
ring->rx_max_pending = IDPF_MAX_RXQ_DESC;
ring->tx_max_pending = IDPF_MAX_TXQ_DESC;
@@ -311,7 +310,7 @@ static void idpf_get_ringparam(struct net_device *netdev,
kring->tcp_data_split = idpf_vport_get_hsplit(vport);
- idpf_vport_cfg_unlock(adapter);
+ idpf_vport_cfg_unlock(vport);
}
/**
@@ -329,15 +328,14 @@ static int idpf_set_ringparam(struct net_device *netdev,
struct kernel_ethtool_ringparam *kring,
struct netlink_ext_ack *ext_ack)
{
- struct idpf_adapter *adapter = idpf_netdev_to_adapter(netdev);
struct idpf_vport_user_config_data *config_data;
u32 new_rx_count, new_tx_count;
struct idpf_vport *vport;
int i, err = 0;
u16 idx;
- idpf_vport_cfg_lock(adapter);
vport = idpf_netdev_to_vport(netdev);
+ idpf_vport_cfg_lock(vport);
idx = vport->idx;
@@ -395,7 +393,7 @@ static int idpf_set_ringparam(struct net_device *netdev,
err = idpf_initiate_soft_reset(vport, IDPF_SR_Q_DESC_CHANGE);
unlock_mutex:
- idpf_vport_cfg_unlock(adapter);
+ idpf_vport_cfg_unlock(vport);
return err;
}
@@ -870,7 +868,6 @@ static void idpf_get_ethtool_stats(struct net_device *netdev,
u64 *data)
{
struct idpf_netdev_priv *np = netdev_priv(netdev);
- struct idpf_adapter *adapter = np->adapter;
struct idpf_vport_config *vport_config;
struct idpf_vport *vport;
unsigned int total = 0;
@@ -878,11 +875,11 @@ static void idpf_get_ethtool_stats(struct net_device *netdev,
bool is_splitq;
u16 qtype;
- idpf_vport_cfg_lock(adapter);
vport = idpf_netdev_to_vport(netdev);
+ idpf_vport_cfg_lock(vport);
if (np->state != __IDPF_VPORT_UP) {
- idpf_vport_cfg_unlock(adapter);
+ idpf_vport_cfg_unlock(vport);
return;
}
@@ -949,7 +946,7 @@ static void idpf_get_ethtool_stats(struct net_device *netdev,
rcu_read_unlock();
- idpf_vport_cfg_unlock(adapter);
+ idpf_vport_cfg_unlock(vport);
}
/**
@@ -1027,12 +1024,11 @@ static int idpf_get_q_coalesce(struct net_device *netdev,
u32 q_num)
{
const struct idpf_netdev_priv *np = netdev_priv(netdev);
- struct idpf_adapter *adapter = np->adapter;
- const struct idpf_vport *vport;
+ struct idpf_vport *vport;
int err = 0;
- idpf_vport_cfg_lock(adapter);
vport = idpf_netdev_to_vport(netdev);
+ idpf_vport_cfg_lock(vport);
if (np->state != __IDPF_VPORT_UP)
goto unlock_mutex;
@@ -1051,7 +1047,7 @@ static int idpf_get_q_coalesce(struct net_device *netdev,
VIRTCHNL2_QUEUE_TYPE_TX);
unlock_mutex:
- idpf_vport_cfg_unlock(adapter);
+ idpf_vport_cfg_unlock(vport);
return err;
}
@@ -1203,12 +1199,11 @@ static int idpf_set_coalesce(struct net_device *netdev,
struct netlink_ext_ack *extack)
{
struct idpf_netdev_priv *np = netdev_priv(netdev);
- struct idpf_adapter *adapter = np->adapter;
struct idpf_vport *vport;
int i, err = 0;
- idpf_vport_cfg_lock(adapter);
vport = idpf_netdev_to_vport(netdev);
+ idpf_vport_cfg_lock(vport);
if (np->state != __IDPF_VPORT_UP)
goto unlock_mutex;
@@ -1226,7 +1221,7 @@ static int idpf_set_coalesce(struct net_device *netdev,
}
unlock_mutex:
- idpf_vport_cfg_unlock(adapter);
+ idpf_vport_cfg_unlock(vport);
return err;
}
@@ -1242,23 +1237,22 @@ static int idpf_set_coalesce(struct net_device *netdev,
static int idpf_set_per_q_coalesce(struct net_device *netdev, u32 q_num,
struct ethtool_coalesce *ec)
{
- struct idpf_adapter *adapter = idpf_netdev_to_adapter(netdev);
struct idpf_vport *vport;
int err;
- idpf_vport_cfg_lock(adapter);
vport = idpf_netdev_to_vport(netdev);
+ idpf_vport_cfg_lock(vport);
err = idpf_set_q_coalesce(vport, ec, q_num, false);
if (err) {
- idpf_vport_cfg_unlock(adapter);
+ idpf_vport_cfg_unlock(vport);
return err;
}
err = idpf_set_q_coalesce(vport, ec, q_num, true);
- idpf_vport_cfg_unlock(adapter);
+ idpf_vport_cfg_unlock(vport);
return err;
}
@@ -910,12 +910,12 @@ static int idpf_stop(struct net_device *netdev)
if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags))
return 0;
- idpf_vport_cfg_lock(adapter);
vport = idpf_netdev_to_vport(netdev);
+ idpf_vport_cfg_lock(vport);
idpf_vport_stop(vport);
- idpf_vport_cfg_unlock(adapter);
+ idpf_vport_cfg_unlock(vport);
return 0;
}
@@ -1001,9 +1001,9 @@ static void idpf_vport_dealloc(struct idpf_vport *vport)
idpf_deinit_mac_addr(vport);
- idpf_vport_cfg_lock(adapter);
+ idpf_vport_cfg_lock(vport);
idpf_vport_stop(vport);
- idpf_vport_cfg_unlock(adapter);
+ idpf_vport_cfg_unlock(vport);
if (!test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags))
idpf_decfg_netdev(vport);
@@ -1016,6 +1016,7 @@ static void idpf_vport_dealloc(struct idpf_vport *vport)
np->vport = NULL;
}
+ mutex_destroy(&vport->vport_cfg_lock);
idpf_vport_rel(vport);
adapter->vports[i] = NULL;
@@ -1156,6 +1157,7 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
adapter->vports[idx] = vport;
adapter->vport_ids[idx] = idpf_get_vport_id(vport);
+ mutex_init(&vport->vport_cfg_lock);
adapter->num_alloc_vports++;
/* prepare adapter->next_vport for next use */
adapter->next_vport = idpf_get_free_slot(adapter);
@@ -1526,9 +1528,9 @@ void idpf_init_task(struct work_struct *work)
np = netdev_priv(vport->netdev);
np->state = __IDPF_VPORT_DOWN;
if (test_and_clear_bit(IDPF_VPORT_UP_REQUESTED, vport_config->flags)) {
- idpf_vport_cfg_lock(adapter);
+ idpf_vport_cfg_lock(vport);
idpf_vport_open(vport);
- idpf_vport_cfg_unlock(adapter);
+ idpf_vport_cfg_unlock(vport);
}
/* Spawn and return 'idpf_init_task' work queue until all the
@@ -2131,8 +2133,8 @@ static int idpf_set_features(struct net_device *netdev,
struct idpf_vport *vport;
int err = 0;
- idpf_vport_cfg_lock(adapter);
vport = idpf_netdev_to_vport(netdev);
+ idpf_vport_cfg_lock(vport);
if (idpf_is_reset_in_prog(adapter)) {
dev_err(&adapter->pdev->dev, "Device is resetting, changing netdev features temporarily unavailable.\n");
@@ -2160,7 +2162,7 @@ static int idpf_set_features(struct net_device *netdev,
}
unlock_mutex:
- idpf_vport_cfg_unlock(adapter);
+ idpf_vport_cfg_unlock(vport);
return err;
}
@@ -2186,12 +2188,12 @@ static int idpf_open(struct net_device *netdev)
if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags))
return 0;
- idpf_vport_cfg_lock(adapter);
vport = idpf_netdev_to_vport(netdev);
+ idpf_vport_cfg_lock(vport);
err = idpf_vport_open(vport);
- idpf_vport_cfg_unlock(adapter);
+ idpf_vport_cfg_unlock(vport);
return err;
}
@@ -2205,18 +2207,17 @@ static int idpf_open(struct net_device *netdev)
*/
static int idpf_change_mtu(struct net_device *netdev, int new_mtu)
{
- struct idpf_adapter *adapter = idpf_netdev_to_adapter(netdev);
struct idpf_vport *vport;
int err;
- idpf_vport_cfg_lock(adapter);
vport = idpf_netdev_to_vport(netdev);
+ idpf_vport_cfg_lock(vport);
WRITE_ONCE(netdev->mtu, new_mtu);
err = idpf_initiate_soft_reset(vport, IDPF_SR_MTU_CHANGE);
- idpf_vport_cfg_unlock(adapter);
+ idpf_vport_cfg_unlock(vport);
return err;
}
@@ -2298,8 +2299,8 @@ static int idpf_set_mac(struct net_device *netdev, void *p)
struct idpf_vport *vport;
int err = 0;
- idpf_vport_cfg_lock(adapter);
vport = idpf_netdev_to_vport(netdev);
+ idpf_vport_cfg_lock(vport);
if (!idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS,
VIRTCHNL2_CAP_MACFILTER)) {
@@ -2332,7 +2333,7 @@ static int idpf_set_mac(struct net_device *netdev, void *p)
eth_hw_addr_set(netdev, addr->sa_data);
unlock_mutex:
- idpf_vport_cfg_unlock(adapter);
+ idpf_vport_cfg_unlock(vport);
return err;
}
@@ -80,7 +80,6 @@ static void idpf_remove(struct pci_dev *pdev)
adapter->vcxn_mngr = NULL;
mutex_destroy(&adapter->vport_init_lock);
- mutex_destroy(&adapter->vport_cfg_lock);
mutex_destroy(&adapter->vector_lock);
mutex_destroy(&adapter->queue_lock);
mutex_destroy(&adapter->vc_buf_lock);
@@ -145,7 +144,6 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->req_rx_splitq = true;
mutex_init(&adapter->vport_init_lock);
- mutex_init(&adapter->vport_cfg_lock);
mutex_init(&adapter->vector_lock);
mutex_init(&adapter->queue_lock);
mutex_init(&adapter->vc_buf_lock);