@@ -13,6 +13,7 @@
#include <linux/ktime.h>
#include <linux/delay.h>
#include <linux/pm_runtime.h>
+#include <linux/kthread.h>
#include "mei_dev.h"
#include "hw-me.h"
@@ -66,13 +67,27 @@ static int mei_gsc_probe(struct auxiliary_device *aux_dev,
dev_set_drvdata(&aux_dev->dev, dev);
- ret = devm_request_threaded_irq(device, hw->irq,
- mei_me_irq_quick_handler,
- mei_me_irq_thread_handler,
- IRQF_ONESHOT, KBUILD_MODNAME, dev);
- if (ret) {
- dev_err(device, "irq register failed %d\n", ret);
- goto err;
+ /* use polling */
+ if (mei_me_hw_use_polling(hw)) {
+ mei_disable_interrupts(dev);
+ init_waitqueue_head(&hw->wait_active);
+ hw->is_active = true; /* start in active mode for initialization */
+ hw->polling_thread = kthread_run(mei_me_polling_thread, dev,
+ "kmegscirqd/%s", dev_name(device));
+ if (IS_ERR(hw->polling_thread)) {
+ ret = PTR_ERR(hw->polling_thread);
+ dev_err(device, "unable to create kernel thread: %d\n", ret);
+ goto err;
+ }
+ } else {
+ ret = devm_request_threaded_irq(device, hw->irq,
+ mei_me_irq_quick_handler,
+ mei_me_irq_thread_handler,
+ IRQF_ONESHOT, KBUILD_MODNAME, dev);
+ if (ret) {
+ dev_err(device, "irq register failed %d\n", ret);
+ goto err;
+ }
}
pm_runtime_get_noresume(device);
@@ -108,6 +123,7 @@ static int mei_gsc_probe(struct auxiliary_device *aux_dev,
static void mei_gsc_remove(struct auxiliary_device *aux_dev)
{
struct mei_device *dev;
+ struct mei_me_hw *hw;
dev = dev_get_drvdata(&aux_dev->dev);
if (!dev)
@@ -115,6 +131,10 @@ static void mei_gsc_remove(struct auxiliary_device *aux_dev)
mei_stop(dev);
+ hw = to_me_hw(dev);
+ if (mei_me_hw_use_polling(hw))
+ kthread_stop(hw->polling_thread);
+
mei_deregister(dev);
pm_runtime_disable(&aux_dev->dev);
@@ -178,6 +198,9 @@ static int __maybe_unused mei_gsc_pm_runtime_suspend(struct device *device)
if (mei_write_is_idle(dev)) {
hw = to_me_hw(dev);
hw->pg_state = MEI_PG_ON;
+
+ if (mei_me_hw_use_polling(hw))
+ hw->is_active = false;
ret = 0;
} else {
ret = -EAGAIN;
@@ -202,6 +225,11 @@ static int __maybe_unused mei_gsc_pm_runtime_resume(struct device *device)
hw = to_me_hw(dev);
hw->pg_state = MEI_PG_OFF;
+ if (mei_me_hw_use_polling(hw)) {
+ hw->is_active = true;
+ wake_up(&hw->wait_active);
+ }
+
mutex_unlock(&dev->device_lock);
irq_ret = mei_me_irq_thread_handler(1, dev);
@@ -10,6 +10,7 @@
#include <linux/interrupt.h>
#include <linux/pm_runtime.h>
#include <linux/sizes.h>
+#include <linux/delay.h>
#include "mei_dev.h"
#include "hbm.h"
@@ -327,9 +328,12 @@ static void mei_me_intr_clear(struct mei_device *dev)
*/
static void mei_me_intr_enable(struct mei_device *dev)
{
- u32 hcsr = mei_hcsr_read(dev);
+ u32 hcsr;
+
+ if (mei_me_hw_use_polling(to_me_hw(dev)))
+ return;
- hcsr |= H_CSR_IE_MASK;
+ hcsr = mei_hcsr_read(dev) | H_CSR_IE_MASK;
mei_hcsr_set(dev, hcsr);
}
@@ -354,6 +358,9 @@ static void mei_me_synchronize_irq(struct mei_device *dev)
{
struct mei_me_hw *hw = to_me_hw(dev);
+ if (mei_me_hw_use_polling(hw))
+ return;
+
synchronize_irq(hw->irq);
}
@@ -380,7 +387,10 @@ static void mei_me_host_set_ready(struct mei_device *dev)
{
u32 hcsr = mei_hcsr_read(dev);
- hcsr |= H_CSR_IE_MASK | H_IG | H_RDY;
+ if (!mei_me_hw_use_polling(to_me_hw(dev)))
+ hcsr |= H_CSR_IE_MASK;
+
+ hcsr |= H_IG | H_RDY;
mei_hcsr_set(dev, hcsr);
}
@@ -1174,7 +1184,7 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
hcsr |= H_RST | H_IG | H_CSR_IS_MASK;
- if (!intr_enable)
+ if (!intr_enable || mei_me_hw_use_polling(to_me_hw(dev)))
hcsr &= ~H_CSR_IE_MASK;
dev->recvd_hw_ready = false;
@@ -1323,6 +1333,46 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
}
EXPORT_SYMBOL_GPL(mei_me_irq_thread_handler);
+#define MEI_POLLING_TIMEOUT_ACTIVE 100
+#define MEI_POLLING_TIMEOUT_IDLE 500
+
+int mei_me_polling_thread(void *_dev)
+{
+ struct mei_device *dev = _dev;
+ irqreturn_t irq_ret;
+ long polling_timeout = MEI_POLLING_TIMEOUT_ACTIVE;
+
+ dev_dbg(dev->dev, "kernel thread is running\n");
+ while (!kthread_should_stop()) {
+ struct mei_me_hw *hw = to_me_hw(dev);
+ u32 hcsr;
+
+ wait_event_timeout(hw->wait_active,
+ hw->is_active || kthread_should_stop(),
+ msecs_to_jiffies(MEI_POLLING_TIMEOUT_IDLE));
+
+ if (kthread_should_stop())
+ break;
+
+ hcsr = mei_hcsr_read(dev);
+ if (me_intr_src(hcsr)) {
+ polling_timeout = MEI_POLLING_TIMEOUT_ACTIVE;
+ irq_ret = mei_me_irq_thread_handler(1, dev);
+ if (irq_ret != IRQ_HANDLED)
+ dev_err(dev->dev, "irq_ret %d\n", irq_ret);
+ } else {
+ polling_timeout = clamp_val(polling_timeout + MEI_POLLING_TIMEOUT_ACTIVE,
+ MEI_POLLING_TIMEOUT_ACTIVE,
+ MEI_POLLING_TIMEOUT_IDLE);
+ }
+
+ schedule_timeout_interruptible(msecs_to_jiffies(polling_timeout));
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mei_me_polling_thread);
+
static const struct mei_hw_ops mei_me_hw_ops = {
.trc_status = mei_me_trc_status,
@@ -51,6 +51,8 @@ struct mei_cfg {
* @d0i3_supported: di03 support
* @hbuf_depth: depth of hardware host/write buffer in slots
* @read_fws: read FW status register handler
+ * @wait_active: the polling thread activity wait queue
+ * @is_active: the device is active
*/
struct mei_me_hw {
const struct mei_cfg *cfg;
@@ -60,10 +62,19 @@ struct mei_me_hw {
bool d0i3_supported;
u8 hbuf_depth;
int (*read_fws)(const struct mei_device *dev, int where, u32 *val);
+ /* polling */
+ struct task_struct *polling_thread;
+ wait_queue_head_t wait_active;
+ bool is_active;
};
#define to_me_hw(dev) (struct mei_me_hw *)((dev)->hw)
+static inline bool mei_me_hw_use_polling(const struct mei_me_hw *hw)
+{
+ return hw->irq < 0;
+}
+
/**
* enum mei_cfg_idx - indices to platform specific configurations.
*
@@ -127,5 +138,6 @@ int mei_me_pg_exit_sync(struct mei_device *dev);
irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id);
irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id);
+int mei_me_polling_thread(void *_dev);
#endif /* _MEI_INTERFACE_H_ */