@@ -182,7 +182,7 @@ int qed_hw_prepare(struct qed_dev *cdev,
void qed_hw_remove(struct qed_dev *cdev);
/**
- * qed_ptt_acquire(): Allocate a PTT window.
+ * qed_ptt_acquire(): Allocate a PTT window in sleepable context.
*
* @p_hwfn: HW device data.
*
@@ -193,6 +193,16 @@ void qed_hw_remove(struct qed_dev *cdev);
*/
struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn);
+/**
+ * _qed_ptt_acquire(): Allocate a PTT window based on the context.
+ *
+ * @p_hwfn: HW device data.
+ * @is_atomic: context (sleepable or unsleepable) based on which ptt is acquired.
+ *
+ * Return: struct qed_ptt.
+ */
+struct qed_ptt *_qed_ptt_acquire(struct qed_hwfn *p_hwfn, bool is_atomic);
+
/**
* qed_ptt_release(): Release PTT Window.
*
@@ -23,7 +23,10 @@
#include "qed_reg_addr.h"
#include "qed_sriov.h"
-#define QED_BAR_ACQUIRE_TIMEOUT 1000
+#define QED_BAR_ACQUIRE_TIMEOUT_USLEEP_CNT 1000
+#define QED_BAR_ACQUIRE_TIMEOUT_USLEEP 1000
+#define QED_BAR_ACQUIRE_TIMEOUT_UDELAY_CNT 100000
+#define QED_BAR_ACQUIRE_TIMEOUT_UDELAY 10
/* Invalid values */
#define QED_BAR_INVALID_OFFSET (cpu_to_le32(-1))
@@ -83,13 +86,18 @@ void qed_ptt_pool_free(struct qed_hwfn *p_hwfn)
p_hwfn->p_ptt_pool = NULL;
}
-struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn)
+struct qed_ptt *_qed_ptt_acquire(struct qed_hwfn *p_hwfn, bool is_atomic)
{
struct qed_ptt *p_ptt;
- unsigned int i;
+ unsigned int i, count;
+
+ if (is_atomic)
+ count = QED_BAR_ACQUIRE_TIMEOUT_UDELAY_CNT;
+ else
+ count = QED_BAR_ACQUIRE_TIMEOUT_USLEEP_CNT;
/* Take the free PTT from the list */
- for (i = 0; i < QED_BAR_ACQUIRE_TIMEOUT; i++) {
+ for (i = 0; i < count; i++) {
spin_lock_bh(&p_hwfn->p_ptt_pool->lock);
if (!list_empty(&p_hwfn->p_ptt_pool->free_list)) {
@@ -105,13 +113,23 @@ struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn)
}
spin_unlock_bh(&p_hwfn->p_ptt_pool->lock);
- usleep_range(1000, 2000);
+
+ if (is_atomic)
+ udelay(QED_BAR_ACQUIRE_TIMEOUT_UDELAY);
+ else
+ usleep_range(QED_BAR_ACQUIRE_TIMEOUT_USLEEP,
+ QED_BAR_ACQUIRE_TIMEOUT_USLEEP * 2);
}
DP_NOTICE(p_hwfn, "PTT acquire timeout - failed to allocate PTT\n");
return NULL;
}
+struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn)
+{
+ return _qed_ptt_acquire(p_hwfn, false);
+}
+
void qed_ptt_release(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
spin_lock_bh(&p_hwfn->p_ptt_pool->lock);
@@ -1863,7 +1863,7 @@ static void __qed_get_vport_stats(struct qed_hwfn *p_hwfn,
}
static void _qed_get_vport_stats(struct qed_dev *cdev,
- struct qed_eth_stats *stats)
+ struct qed_eth_stats *stats, bool is_atomic)
{
u8 fw_vport = 0;
int i;
@@ -1872,7 +1872,7 @@ static void _qed_get_vport_stats(struct qed_dev *cdev,
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
- struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn)
+ struct qed_ptt *p_ptt = IS_PF(cdev) ? _qed_ptt_acquire(p_hwfn, is_atomic)
: NULL;
bool b_get_port_stats;
@@ -1899,7 +1899,8 @@ static void _qed_get_vport_stats(struct qed_dev *cdev,
}
}
-void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats)
+void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats,
+ bool is_atomic)
{
u32 i;
@@ -1908,7 +1909,7 @@ void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats)
return;
}
- _qed_get_vport_stats(cdev, stats);
+ _qed_get_vport_stats(cdev, stats, is_atomic);
if (!cdev->reset_stats)
return;
@@ -1960,7 +1961,7 @@ void qed_reset_vport_stats(struct qed_dev *cdev)
if (!cdev->reset_stats) {
DP_INFO(cdev, "Reset stats not allocated\n");
} else {
- _qed_get_vport_stats(cdev, cdev->reset_stats);
+ _qed_get_vport_stats(cdev, cdev->reset_stats, false);
cdev->reset_stats->common.link_change_count = 0;
}
}
@@ -249,7 +249,8 @@ qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_data);
-void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats);
+void qed_get_vport_stats(struct qed_dev *cdev,
+ struct qed_eth_stats *stats, bool is_atomic);
void qed_reset_vport_stats(struct qed_dev *cdev);
@@ -3101,7 +3101,7 @@ void qed_get_protocol_stats(struct qed_dev *cdev,
switch (type) {
case QED_MCP_LAN_STATS:
- qed_get_vport_stats(cdev, ð_stats);
+ qed_get_vport_stats(cdev, ð_stats, false);
stats->lan_stats.ucast_rx_pkts =
eth_stats.common.rx_ucast_pkts;
stats->lan_stats.ucast_tx_pkts =
@@ -3161,7 +3161,7 @@ qed_fill_generic_tlv_data(struct qed_dev *cdev, struct qed_mfw_tlv_generic *tlv)
}
}
- qed_get_vport_stats(cdev, &stats);
+ qed_get_vport_stats(cdev, &stats, false);
p_common = &stats.common;
tlv->rx_frames = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
p_common->rx_bcast_pkts;
@@ -569,7 +569,7 @@ void qede_set_udp_tunnels(struct qede_dev *edev);
void qede_reload(struct qede_dev *edev,
struct qede_reload_args *args, bool is_locked);
int qede_change_mtu(struct net_device *dev, int new_mtu);
-void qede_fill_by_demand_stats(struct qede_dev *edev);
+void qede_fill_by_demand_stats(struct qede_dev *edev, bool is_atomic);
void __qede_lock(struct qede_dev *edev);
void __qede_unlock(struct qede_dev *edev);
bool qede_has_rx_work(struct qede_rx_queue *rxq);
@@ -408,7 +408,7 @@ static void qede_get_ethtool_stats(struct net_device *dev,
struct qede_fastpath *fp;
int i;
- qede_fill_by_demand_stats(edev);
+ qede_fill_by_demand_stats(edev, false);
/* Need to protect the access to the fastpath array */
__qede_lock(edev);
@@ -301,12 +301,12 @@ module_exit(qede_cleanup);
static int qede_open(struct net_device *ndev);
static int qede_close(struct net_device *ndev);
-void qede_fill_by_demand_stats(struct qede_dev *edev)
+void qede_fill_by_demand_stats(struct qede_dev *edev, bool is_atomic)
{
struct qede_stats_common *p_common = &edev->stats.common;
struct qed_eth_stats stats;
- edev->ops->get_vport_stats(edev->cdev, &stats);
+ edev->ops->get_vport_stats(edev->cdev, &stats, is_atomic);
p_common->no_buff_discards = stats.common.no_buff_discards;
p_common->packet_too_big_discard = stats.common.packet_too_big_discard;
@@ -413,7 +413,7 @@ static void qede_get_stats64(struct net_device *dev,
struct qede_dev *edev = netdev_priv(dev);
struct qede_stats_common *p_common;
- qede_fill_by_demand_stats(edev);
+ qede_fill_by_demand_stats(edev, true);
p_common = &edev->stats.common;
stats->rx_packets = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
@@ -319,7 +319,7 @@ struct qed_eth_ops {
struct eth_slow_path_rx_cqe *cqe);
void (*get_vport_stats)(struct qed_dev *cdev,
- struct qed_eth_stats *stats);
+ struct qed_eth_stats *stats, bool is_atomic);
int (*tunn_config)(struct qed_dev *cdev,
struct qed_tunn_params *params);