diff mbox series

[v2,11/14] net: wwan: t7xx: Runtime PM

Message ID 20211101035635.26999-12-ricardo.martinez@linux.intel.com (mailing list archive)
State Changes Requested
Delegated to: Netdev Maintainers
Headers show
Series net: wwan: t7xx: PCIe driver for MediaTek M.2 modem | expand

Checks

Context Check Description
netdev/cover_letter success Series has a cover letter
netdev/fixes_present success Fixes tag not required for -next series
netdev/patch_count success Link
netdev/tree_selection success Guessed tree name to be net-next
netdev/subject_prefix success Link
netdev/cc_maintainers warning 3 maintainers not CCed: linux-arm-kernel@lists.infradead.org linux-mediatek@lists.infradead.org matthias.bgg@gmail.com
netdev/source_inline success Was 0 now: 0
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/module_param success Was 0 now: 0
netdev/build_32bit success Errors and warnings before: 0 this patch: 0
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/verify_fixes success No Fixes tag
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 223 lines checked
netdev/build_allmodconfig_warn success Errors and warnings before: 0 this patch: 0
netdev/header_inline success No static functions without inline keyword in header files

Commit Message

Ricardo Martinez Nov. 1, 2021, 3:56 a.m. UTC
From: Haijun Lio <haijun.liu@mediatek.com>

Enables runtime power management callbacks including runtime_suspend
and runtime_resume. Autosuspend is used to prevent overhead by frequent
wake-ups.

Signed-off-by: Haijun Lio <haijun.liu@mediatek.com>
Signed-off-by: Chandrashekar Devegowda <chandrashekar.devegowda@intel.com>
Co-developed-by: Eliot Lee <eliot.lee@intel.com>
Signed-off-by: Eliot Lee <eliot.lee@intel.com>
Signed-off-by: Ricardo Martinez <ricardo.martinez@linux.intel.com>
---
 drivers/net/wwan/t7xx/t7xx_hif_cldma.c     | 13 +++++++++++++
 drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c | 16 ++++++++++++++++
 drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c | 15 +++++++++++++++
 drivers/net/wwan/t7xx/t7xx_pci.c           | 21 +++++++++++++++++++++
 4 files changed, 65 insertions(+)
diff mbox series

Patch

diff --git a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
index bcee31a5af12..18c1fcccd9dc 100644
--- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
+++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
@@ -22,6 +22,7 @@ 
 #include <linux/kernel.h>
 #include <linux/list.h>
 #include <linux/mutex.h>
+#include <linux/pm_runtime.h>
 #include <linux/skbuff.h>
 
 #include "t7xx_cldma.h"
@@ -310,6 +311,8 @@  static void cldma_rx_done(struct work_struct *work)
 	/* enable RX_DONE && EMPTY interrupt */
 	cldma_hw_dismask_txrxirq(&md_ctrl->hw_info, queue->index, true);
 	cldma_hw_dismask_eqirq(&md_ctrl->hw_info, queue->index, true);
+	pm_runtime_mark_last_busy(md_ctrl->dev);
+	pm_runtime_put_autosuspend(md_ctrl->dev);
 }
 
 static int cldma_gpd_tx_collect(struct cldma_queue *queue)
@@ -451,6 +454,8 @@  static void cldma_tx_done(struct work_struct *work)
 	}
 
 	spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
+	pm_runtime_mark_last_busy(md_ctrl->dev);
+	pm_runtime_put_autosuspend(md_ctrl->dev);
 }
 
 static void cldma_ring_free(struct cldma_ctrl *md_ctrl,
@@ -674,6 +679,7 @@  static void cldma_irq_work_cb(struct cldma_ctrl *md_ctrl)
 		if (l2_tx_int & (TXRX_STATUS_BITMASK | EMPTY_STATUS_BITMASK)) {
 			for (i = 0; i < CLDMA_TXQ_NUM; i++) {
 				if (l2_tx_int & BIT(i)) {
+					pm_runtime_get(md_ctrl->dev);
 					/* disable TX_DONE interrupt */
 					cldma_hw_mask_eqirq(hw_info, i, false);
 					cldma_hw_mask_txrxirq(hw_info, i, false);
@@ -702,6 +708,7 @@  static void cldma_irq_work_cb(struct cldma_ctrl *md_ctrl)
 		if (l2_rx_int & (TXRX_STATUS_BITMASK | EMPTY_STATUS_BITMASK)) {
 			for (i = 0; i < CLDMA_RXQ_NUM; i++) {
 				if (l2_rx_int & (BIT(i) | EQ_STA_BIT(i))) {
+					pm_runtime_get(md_ctrl->dev);
 					/* disable RX_DONE and QUEUE_EMPTY interrupt */
 					cldma_hw_mask_eqirq(hw_info, i, true);
 					cldma_hw_mask_txrxirq(hw_info, i, true);
@@ -1133,8 +1140,12 @@  int cldma_send_skb(enum cldma_id hif_id, int qno, struct sk_buff *skb, bool skb_
 	struct cldma_queue *queue;
 	unsigned long flags;
 	int ret = 0;
+	int val;
 
 	md_ctrl = md_cd_get(hif_id);
+	val = pm_runtime_resume_and_get(md_ctrl->dev);
+	if (val < 0 && val != -EACCES)
+		return val;
 
 	if (qno >= CLDMA_TXQ_NUM) {
 		ret = -EINVAL;
@@ -1199,6 +1210,8 @@  int cldma_send_skb(enum cldma_id hif_id, int qno, struct sk_buff *skb, bool skb_
 	} while (!ret);
 
 exit:
+	pm_runtime_mark_last_busy(md_ctrl->dev);
+	pm_runtime_put_autosuspend(md_ctrl->dev);
 	return ret;
 }
 
diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
index e4af05441707..ae38fb29ec81 100644
--- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
+++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
@@ -22,6 +22,7 @@ 
 #include <linux/kthread.h>
 #include <linux/list.h>
 #include <linux/mm.h>
+#include <linux/pm_runtime.h>
 #include <linux/skbuff.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
@@ -1039,6 +1040,7 @@  static void dpmaif_rxq_work(struct work_struct *work)
 {
 	struct dpmaif_ctrl *dpmaif_ctrl;
 	struct dpmaif_rx_queue *rxq;
+	int ret;
 
 	rxq = container_of(work, struct dpmaif_rx_queue, dpmaif_rxq_work);
 	dpmaif_ctrl = rxq->dpmaif_ctrl;
@@ -1053,8 +1055,14 @@  static void dpmaif_rxq_work(struct work_struct *work)
 		return;
 	}
 
+	ret = pm_runtime_resume_and_get(dpmaif_ctrl->dev);
+	if (ret < 0 && ret != -EACCES)
+		return;
+
 	dpmaif_do_rx(dpmaif_ctrl, rxq);
 
+	pm_runtime_mark_last_busy(dpmaif_ctrl->dev);
+	pm_runtime_put_autosuspend(dpmaif_ctrl->dev);
 	atomic_set(&rxq->rx_processing, 0);
 }
 
@@ -1417,9 +1425,14 @@  static void dpmaif_bat_release_work(struct work_struct *work)
 {
 	struct dpmaif_ctrl *dpmaif_ctrl;
 	struct dpmaif_rx_queue *rxq;
+	int ret;
 
 	dpmaif_ctrl = container_of(work, struct dpmaif_ctrl, bat_release_work);
 
+	ret = pm_runtime_resume_and_get(dpmaif_ctrl->dev);
+	if (ret < 0 && ret != -EACCES)
+		return;
+
 	/* ALL RXQ use one BAT table, so choose DPF_RX_QNO_DFT */
 	rxq = &dpmaif_ctrl->rxq[DPF_RX_QNO_DFT];
 
@@ -1427,6 +1440,9 @@  static void dpmaif_bat_release_work(struct work_struct *work)
 	dpmaif_dl_pkt_bat_release_and_add(rxq);
 	/* frag BAT release and add */
 	dpmaif_dl_frag_bat_release_and_add(rxq);
+
+	pm_runtime_mark_last_busy(dpmaif_ctrl->dev);
+	pm_runtime_put_autosuspend(dpmaif_ctrl->dev);
 }
 
 int dpmaif_bat_release_work_alloc(struct dpmaif_ctrl *dpmaif_ctrl)
diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c
index 3ae87761af05..84fc980824e5 100644
--- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c
+++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c
@@ -18,6 +18,7 @@ 
 #include <linux/kernel.h>
 #include <linux/kthread.h>
 #include <linux/list.h>
+#include <linux/pm_runtime.h>
 #include <linux/spinlock.h>
 
 #include "t7xx_common.h"
@@ -167,6 +168,10 @@  static void dpmaif_tx_done(struct work_struct *work)
 	txq = container_of(work, struct dpmaif_tx_queue, dpmaif_tx_work);
 	dpmaif_ctrl = txq->dpmaif_ctrl;
 
+	ret = pm_runtime_resume_and_get(dpmaif_ctrl->dev);
+	if (ret < 0 && ret != -EACCES)
+		return;
+
 	ret = dpmaif_tx_release(dpmaif_ctrl, txq->index, txq->drb_size_cnt);
 	if (ret == -EAGAIN ||
 	    (dpmaif_hw_check_clr_ul_done_status(&dpmaif_ctrl->hif_hw_info, txq->index) &&
@@ -179,6 +184,9 @@  static void dpmaif_tx_done(struct work_struct *work)
 		dpmaif_clr_ip_busy_sts(&dpmaif_ctrl->hif_hw_info);
 		dpmaif_unmask_ulq_interrupt(dpmaif_ctrl, txq->index);
 	}
+
+	pm_runtime_mark_last_busy(dpmaif_ctrl->dev);
+	pm_runtime_put_autosuspend(dpmaif_ctrl->dev);
 }
 
 static void set_drb_msg(struct dpmaif_ctrl *dpmaif_ctrl,
@@ -513,6 +521,7 @@  static void do_tx_hw_push(struct dpmaif_ctrl *dpmaif_ctrl)
 static int dpmaif_tx_hw_push_thread(void *arg)
 {
 	struct dpmaif_ctrl *dpmaif_ctrl;
+	int ret;
 
 	dpmaif_ctrl = arg;
 	while (!kthread_should_stop()) {
@@ -528,7 +537,13 @@  static int dpmaif_tx_hw_push_thread(void *arg)
 		if (kthread_should_stop())
 			break;
 
+		ret = pm_runtime_resume_and_get(dpmaif_ctrl->dev);
+		if (ret < 0 && ret != -EACCES)
+			return ret;
+
 		do_tx_hw_push(dpmaif_ctrl);
+		pm_runtime_mark_last_busy(dpmaif_ctrl->dev);
+		pm_runtime_put_autosuspend(dpmaif_ctrl->dev);
 	}
 
 	return 0;
diff --git a/drivers/net/wwan/t7xx/t7xx_pci.c b/drivers/net/wwan/t7xx/t7xx_pci.c
index 5afd8eb4203f..3328a225e20b 100644
--- a/drivers/net/wwan/t7xx/t7xx_pci.c
+++ b/drivers/net/wwan/t7xx/t7xx_pci.c
@@ -20,6 +20,7 @@ 
 #include <linux/module.h>
 #include <linux/mutex.h>
 #include <linux/pci.h>
+#include <linux/pm_runtime.h>
 #include <linux/spinlock.h>
 
 #include "t7xx_mhccif.h"
@@ -34,6 +35,7 @@ 
 #define PCI_EREG_BASE			2
 
 #define PM_ACK_TIMEOUT_MS		1500
+#define PM_AUTOSUSPEND_MS		20000
 #define PM_RESOURCE_POLL_TIMEOUT_US	10000
 #define PM_RESOURCE_POLL_STEP_US	100
 
@@ -78,6 +80,8 @@  static int mtk_pci_pm_init(struct mtk_pci_dev *mtk_dev)
 	atomic_set(&mtk_dev->md_pm_state, MTK_PM_INIT);
 
 	iowrite32(L1_DISABLE_BIT(0), IREG_BASE(mtk_dev) + DIS_ASPM_LOWPWR_SET_0);
+	pm_runtime_set_autosuspend_delay(&pdev->dev, PM_AUTOSUSPEND_MS);
+	pm_runtime_use_autosuspend(&pdev->dev);
 
 	return mtk_wait_pm_config(mtk_dev);
 }
@@ -92,6 +96,8 @@  void mtk_pci_pm_init_late(struct mtk_pci_dev *mtk_dev)
 			D2H_INT_RESUME_ACK_AP);
 	iowrite32(L1_DISABLE_BIT(0), IREG_BASE(mtk_dev) + DIS_ASPM_LOWPWR_CLR_0);
 	atomic_set(&mtk_dev->md_pm_state, MTK_PM_RESUMED);
+
+	pm_runtime_put_noidle(&mtk_dev->pdev->dev);
 }
 
 static int mtk_pci_pm_reinit(struct mtk_pci_dev *mtk_dev)
@@ -101,6 +107,8 @@  static int mtk_pci_pm_reinit(struct mtk_pci_dev *mtk_dev)
 	 */
 	atomic_set(&mtk_dev->md_pm_state, MTK_PM_INIT);
 
+	pm_runtime_get_noresume(&mtk_dev->pdev->dev);
+
 	iowrite32(L1_DISABLE_BIT(0), IREG_BASE(mtk_dev) + DIS_ASPM_LOWPWR_SET_0);
 	return mtk_wait_pm_config(mtk_dev);
 }
@@ -405,6 +413,7 @@  static int __mtk_pci_pm_resume(struct pci_dev *pdev, bool state_check)
 	mtk_dev->rgu_pci_irq_en = true;
 	mtk_pcie_mac_set_int(mtk_dev, SAP_RGU_INT);
 	iowrite32(L1_DISABLE_BIT(0), IREG_BASE(mtk_dev) + DIS_ASPM_LOWPWR_CLR_0);
+	pm_runtime_mark_last_busy(&pdev->dev);
 	atomic_set(&mtk_dev->md_pm_state, MTK_PM_RESUMED);
 
 	return ret;
@@ -446,6 +455,16 @@  static int mtk_pci_pm_thaw(struct device *dev)
 	return __mtk_pci_pm_resume(to_pci_dev(dev), false);
 }
 
+static int mtk_pci_pm_runtime_suspend(struct device *dev)
+{
+	return __mtk_pci_pm_suspend(to_pci_dev(dev));
+}
+
+static int mtk_pci_pm_runtime_resume(struct device *dev)
+{
+	return __mtk_pci_pm_resume(to_pci_dev(dev), true);
+}
+
 static const struct dev_pm_ops mtk_pci_pm_ops = {
 	.suspend = mtk_pci_pm_suspend,
 	.resume = mtk_pci_pm_resume,
@@ -455,6 +474,8 @@  static const struct dev_pm_ops mtk_pci_pm_ops = {
 	.poweroff = mtk_pci_pm_suspend,
 	.restore = mtk_pci_pm_resume,
 	.restore_noirq = mtk_pci_pm_resume_noirq,
+	.runtime_suspend = mtk_pci_pm_runtime_suspend,
+	.runtime_resume = mtk_pci_pm_runtime_resume
 };
 
 static int mtk_request_irq(struct pci_dev *pdev)