@@ -79,11 +79,17 @@ int ath12k_core_suspend(struct ath12k_base *ab)
ar = ab->pdevs[i].ar;
if (!ar)
continue;
+
+ wiphy_lock(ath12k_ar_to_hw(ar)->wiphy);
+
ret = ath12k_mac_wait_tx_complete(ar);
if (ret) {
+ wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
ath12k_warn(ab, "failed to wait tx complete: %d\n", ret);
return ret;
}
+
+ wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
}
/* PM framework skips suspend_late/resume_early callbacks
@@ -679,7 +679,7 @@ struct ath12k {
struct work_struct regd_update_work;
- struct work_struct wmi_mgmt_tx_work;
+ struct wiphy_work wmi_mgmt_tx_work;
struct sk_buff_head wmi_mgmt_tx_queue;
struct ath12k_wow wow;
@@ -6726,6 +6726,8 @@ static void ath12k_mgmt_over_wmi_tx_drop(struct ath12k *ar, struct sk_buff *skb)
{
int num_mgmt;
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
ieee80211_free_txskb(ath12k_ar_to_hw(ar), skb);
num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx);
@@ -6787,6 +6789,8 @@ static int ath12k_mac_mgmt_tx_wmi(struct ath12k *ar, struct ath12k_link_vif *arv
int buf_id;
int ret;
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
ATH12K_SKB_CB(skb)->ar = ar;
spin_lock_bh(&ar->txmgmt_idr_lock);
buf_id = idr_alloc(&ar->txmgmt_idr, skb, 0,
@@ -6841,7 +6845,7 @@ static void ath12k_mgmt_over_wmi_tx_purge(struct ath12k *ar)
ath12k_mgmt_over_wmi_tx_drop(ar, skb);
}
-static void ath12k_mgmt_over_wmi_tx_work(struct work_struct *work)
+static void ath12k_mgmt_over_wmi_tx_work(struct wiphy *wiphy, struct wiphy_work *work)
{
struct ath12k *ar = container_of(work, struct ath12k, wmi_mgmt_tx_work);
struct ath12k_skb_cb *skb_cb;
@@ -6850,6 +6854,8 @@ static void ath12k_mgmt_over_wmi_tx_work(struct work_struct *work)
struct sk_buff *skb;
int ret;
+ lockdep_assert_wiphy(wiphy);
+
while ((skb = skb_dequeue(&ar->wmi_mgmt_tx_queue)) != NULL) {
skb_cb = ATH12K_SKB_CB(skb);
if (!skb_cb->vif) {
@@ -6904,7 +6910,7 @@ static int ath12k_mac_mgmt_tx(struct ath12k *ar, struct sk_buff *skb,
skb_queue_tail(q, skb);
atomic_inc(&ar->num_pending_mgmt_tx);
- ieee80211_queue_work(ath12k_ar_to_hw(ar), &ar->wmi_mgmt_tx_work);
+ wiphy_work_queue(ath12k_ar_to_hw(ar)->wiphy, &ar->wmi_mgmt_tx_work);
return 0;
}
@@ -6981,10 +6987,12 @@ static void ath12k_mac_op_tx(struct ieee80211_hw *hw,
void ath12k_mac_drain_tx(struct ath12k *ar)
{
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
/* make sure rcu-protected mac80211 tx path itself is drained */
synchronize_net();
- cancel_work_sync(&ar->wmi_mgmt_tx_work);
+ wiphy_work_cancel(ath12k_ar_to_hw(ar)->wiphy, &ar->wmi_mgmt_tx_work);
ath12k_mgmt_over_wmi_tx_purge(ar);
}
@@ -7101,6 +7109,8 @@ static void ath12k_drain_tx(struct ath12k_hw *ah)
struct ath12k *ar;
int i;
+ lockdep_assert_wiphy(ah->hw->wiphy);
+
for_each_ar(ah, ar, i)
ath12k_mac_drain_tx(ar);
}
@@ -9134,6 +9144,8 @@ static int ath12k_mac_flush(struct ath12k *ar)
int ath12k_mac_wait_tx_complete(struct ath12k *ar)
{
+ lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
ath12k_mac_drain_tx(ar);
return ath12k_mac_flush(ar);
}
@@ -10604,7 +10616,7 @@ static void ath12k_mac_setup(struct ath12k *ar)
INIT_DELAYED_WORK(&ar->scan.timeout, ath12k_scan_timeout_work);
INIT_WORK(&ar->regd_update_work, ath12k_regd_update_work);
- INIT_WORK(&ar->wmi_mgmt_tx_work, ath12k_mgmt_over_wmi_tx_work);
+ wiphy_work_init(&ar->wmi_mgmt_tx_work, ath12k_mgmt_over_wmi_tx_work);
skb_queue_head_init(&ar->wmi_mgmt_tx_queue);
}