@@ -25,8 +25,6 @@ static int mt76x0e_start(struct ieee80211_hw *hw)
{
struct mt76x02_dev *dev = hw->priv;
- mutex_lock(&dev->mt76.mutex);
-
mt76x02_mac_start(dev);
mt76x0_phy_calibrate(dev, true);
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mt76.mac_work,
@@ -35,8 +33,6 @@ static int mt76x0e_start(struct ieee80211_hw *hw)
MT_CALIBRATE_INTERVAL);
set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
- mutex_unlock(&dev->mt76.mutex);
-
return 0;
}
@@ -62,10 +58,8 @@ static void mt76x0e_stop(struct ieee80211_hw *hw)
{
struct mt76x02_dev *dev = hw->priv;
- mutex_lock(&dev->mt76.mutex);
clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
mt76x0e_stop_hw(dev);
- mutex_unlock(&dev->mt76.mutex);
}
static void
@@ -81,8 +81,10 @@ static void mt76x0u_cleanup(struct mt76x02_dev *dev)
mt76u_queues_deinit(&dev->mt76);
}
-static void mt76x0u_mac_stop(struct mt76x02_dev *dev)
+static void mt76x0u_stop(struct ieee80211_hw *hw)
{
+ struct mt76x02_dev *dev = hw->priv;
+
clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
cancel_delayed_work_sync(&dev->cal_work);
cancel_delayed_work_sync(&dev->mt76.mac_work);
@@ -106,11 +108,9 @@ static int mt76x0u_start(struct ieee80211_hw *hw)
struct mt76x02_dev *dev = hw->priv;
int ret;
- mutex_lock(&dev->mt76.mutex);
-
ret = mt76x0_mac_start(dev);
if (ret)
- goto out;
+ return ret;
mt76x0_phy_calibrate(dev, true);
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mt76.mac_work,
@@ -118,19 +118,7 @@ static int mt76x0u_start(struct ieee80211_hw *hw)
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
MT_CALIBRATE_INTERVAL);
set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
-
-out:
- mutex_unlock(&dev->mt76.mutex);
- return ret;
-}
-
-static void mt76x0u_stop(struct ieee80211_hw *hw)
-{
- struct mt76x02_dev *dev = hw->priv;
-
- mutex_lock(&dev->mt76.mutex);
- mt76x0u_mac_stop(dev);
- mutex_unlock(&dev->mt76.mutex);
+ return 0;
}
static const struct ieee80211_ops mt76x0u_ops = {
@@ -22,15 +22,13 @@ mt76x2_start(struct ieee80211_hw *hw)
struct mt76x02_dev *dev = hw->priv;
int ret;
- mutex_lock(&dev->mt76.mutex);
-
ret = mt76x2_mac_start(dev);
if (ret)
- goto out;
+ return ret;
ret = mt76x2_phy_start(dev);
if (ret)
- goto out;
+ return ret;
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
MT_MAC_WORK_INTERVAL);
@@ -38,10 +36,7 @@ mt76x2_start(struct ieee80211_hw *hw)
MT_WATCHDOG_TIME);
set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
-
-out:
- mutex_unlock(&dev->mt76.mutex);
- return ret;
+ return 0;
}
static void
@@ -49,10 +44,8 @@ mt76x2_stop(struct ieee80211_hw *hw)
{
struct mt76x02_dev *dev = hw->priv;
- mutex_lock(&dev->mt76.mutex);
clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
mt76x2_stop_hardware(dev);
- mutex_unlock(&dev->mt76.mutex);
}
static int
@@ -21,30 +21,24 @@ static int mt76x2u_start(struct ieee80211_hw *hw)
struct mt76x02_dev *dev = hw->priv;
int ret;
- mutex_lock(&dev->mt76.mutex);
-
ret = mt76x2u_mac_start(dev);
if (ret)
- goto out;
+ return ret;
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
MT_MAC_WORK_INTERVAL);
set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
-out:
- mutex_unlock(&dev->mt76.mutex);
- return ret;
+ return 0;
}
static void mt76x2u_stop(struct ieee80211_hw *hw)
{
struct mt76x02_dev *dev = hw->priv;
- mutex_lock(&dev->mt76.mutex);
clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
mt76u_stop_tx(&dev->mt76);
mt76x2u_stop_hw(dev);
- mutex_unlock(&dev->mt76.mutex);
}
static int
mac80211 .start(), .stop() callbacks are never called concurrently with other callbacks. The only concurencly is with mt76 works which we cancel on stop() and schedule on start(). This fixes possible deadlock on cancel_delayed_work_sync(&dev->mac_work) as mac_work also take mutex. Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com> --- .../net/wireless/mediatek/mt76/mt76x0/pci.c | 6 ----- .../net/wireless/mediatek/mt76/mt76x0/usb.c | 22 +++++-------------- .../wireless/mediatek/mt76/mt76x2/pci_main.c | 13 +++-------- .../wireless/mediatek/mt76/mt76x2/usb_main.c | 10 ++------- 4 files changed, 10 insertions(+), 41 deletions(-)