diff mbox series

[v3,17/25] bus: mhi: ep: Add support for powering down the MHI endpoint stack

Message ID 20220212182117.49438-18-manivannan.sadhasivam@linaro.org (mailing list archive)
State Superseded
Headers show
Series Add initial support for MHI endpoint stack | expand

Commit Message

Manivannan Sadhasivam Feb. 12, 2022, 6:21 p.m. UTC
Add support for MHI endpoint power_down that includes stopping all
available channels, destroying the channels, resetting the event and
transfer rings and freeing the host cache.

The stack will be powered down whenever the physical bus link goes down.

Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
---
 drivers/bus/mhi/ep/main.c | 81 +++++++++++++++++++++++++++++++++++++++
 include/linux/mhi_ep.h    |  6 +++
 2 files changed, 87 insertions(+)

Comments

Alex Elder Feb. 15, 2022, 10:39 p.m. UTC | #1
On 2/12/22 12:21 PM, Manivannan Sadhasivam wrote:
> Add support for MHI endpoint power_down that includes stopping all
> available channels, destroying the channels, resetting the event and
> transfer rings and freeing the host cache.
> 
> The stack will be powered down whenever the physical bus link goes down.
> 
> Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>

Not much to say here either, just a few suggestions.

					-Alex

> ---
>   drivers/bus/mhi/ep/main.c | 81 +++++++++++++++++++++++++++++++++++++++
>   include/linux/mhi_ep.h    |  6 +++
>   2 files changed, 87 insertions(+)
> 
> diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c
> index 016e819f640a..14cb08de4263 100644
> --- a/drivers/bus/mhi/ep/main.c
> +++ b/drivers/bus/mhi/ep/main.c
> @@ -21,6 +21,8 @@
>   
>   static DEFINE_IDA(mhi_ep_cntrl_ida);
>   
> +static int mhi_ep_destroy_device(struct device *dev, void *data);
> +
>   static int mhi_ep_send_event(struct mhi_ep_cntrl *mhi_cntrl, u32 ring_idx,
>   			     struct mhi_ep_ring_element *el)
>   {
> @@ -492,6 +494,71 @@ static irqreturn_t mhi_ep_irq(int irq, void *data)
>   	return IRQ_HANDLED;
>   }
>   
> +static void mhi_ep_abort_transfer(struct mhi_ep_cntrl *mhi_cntrl)
> +{
> +	struct mhi_ep_ring *ch_ring, *ev_ring;
> +	struct mhi_result result = {};
> +	struct mhi_ep_chan *mhi_chan;
> +	int i;
> +
> +	/* Stop all the channels */
> +	for (i = 0; i < mhi_cntrl->max_chan; i++) {
		mhi_chan = &mhi_cntrl->mhi_chan[i];
		ch_ring = &mhi_chan->ring;
	
> +		ch_ring = &mhi_cntrl->mhi_chan[i].ring;
> +		if (!ch_ring->started)
> +			continue;
> +
> +		mhi_chan = &mhi_cntrl->mhi_chan[i];
> +		mutex_lock(&mhi_chan->lock);
> +		/* Send channel disconnect status to client drivers */
> +		if (mhi_chan->xfer_cb) {
> +			result.transaction_status = -ENOTCONN;
> +			result.bytes_xferd = 0;
> +			mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
> +		}
> +
> +		/* Set channel state to DISABLED */

Omit the above comment.

> +		mhi_chan->state = MHI_CH_STATE_DISABLED;
> +		mutex_unlock(&mhi_chan->lock);
> +	}
> +
> +	flush_workqueue(mhi_cntrl->ring_wq);
> +	flush_workqueue(mhi_cntrl->state_wq);
> +
> +	/* Destroy devices associated with all channels */
> +	device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_ep_destroy_device);
> +
> +	/* Stop and reset the transfer rings */
> +	for (i = 0; i < mhi_cntrl->max_chan; i++) {
		mhi_chan = ...

		if (!mhi_chan->ring.started)
			continue;

> +		ch_ring = &mhi_cntrl->mhi_chan[i].ring;
> +		if (!ch_ring->started)
> +			continue;
> +
> +		mhi_chan = &mhi_cntrl->mhi_chan[i];
> +		mutex_lock(&mhi_chan->lock);
> +		mhi_ep_ring_reset(mhi_cntrl, ch_ring);
> +		mutex_unlock(&mhi_chan->lock);
> +	}
> +
> +	/* Stop and reset the event rings */
> +	for (i = 0; i < mhi_cntrl->event_rings; i++) {
> +		ev_ring = &mhi_cntrl->mhi_event[i].ring;
> +		if (!ev_ring->started)
> +			continue;
> +
> +		mutex_lock(&mhi_cntrl->event_lock);
> +		mhi_ep_ring_reset(mhi_cntrl, ev_ring);
> +		mutex_unlock(&mhi_cntrl->event_lock);
> +	}
> +
> +	/* Stop and reset the command ring */
> +	mhi_ep_ring_reset(mhi_cntrl, &mhi_cntrl->mhi_cmd->ring);
> +
> +	mhi_ep_free_host_cfg(mhi_cntrl);
> +	mhi_ep_mmio_mask_interrupts(mhi_cntrl);
> +
> +	mhi_cntrl->is_enabled = false;
> +}
> +
>   int mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl)
>   {
>   	struct device *dev = &mhi_cntrl->mhi_dev->dev;
> @@ -548,6 +615,16 @@ int mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl)
>   }
>   EXPORT_SYMBOL_GPL(mhi_ep_power_up);
>   
> +void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl)
> +{
> +	if (mhi_cntrl->is_enabled)
> +		mhi_ep_abort_transfer(mhi_cntrl);
> +
> +	kfree(mhi_cntrl->mhi_event);
> +	disable_irq(mhi_cntrl->irq);
> +}
> +EXPORT_SYMBOL_GPL(mhi_ep_power_down);
> +
>   static void mhi_ep_release_device(struct device *dev)
>   {
>   	struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
> @@ -828,6 +905,10 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
>   }
>   EXPORT_SYMBOL_GPL(mhi_ep_register_controller);
>   
> +/*
> + * It is expected that the controller drivers will power down the MHI EP stack
> + * using "mhi_ep_power_down()" before calling this function to unregister themselves.
> + */
>   void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl)
>   {
>   	struct mhi_ep_device *mhi_dev = mhi_cntrl->mhi_dev;
> diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h
> index 53895f1c68e1..4f86e7986c93 100644
> --- a/include/linux/mhi_ep.h
> +++ b/include/linux/mhi_ep.h
> @@ -260,4 +260,10 @@ void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl);
>    */
>   int mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl);
>   
> +/**
> + * mhi_ep_power_down - Power down the MHI endpoint stack
> + * @mhi_cntrl: MHI controller
> + */
> +void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl);
> +
>   #endif
diff mbox series

Patch

diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c
index 016e819f640a..14cb08de4263 100644
--- a/drivers/bus/mhi/ep/main.c
+++ b/drivers/bus/mhi/ep/main.c
@@ -21,6 +21,8 @@ 
 
 static DEFINE_IDA(mhi_ep_cntrl_ida);
 
+static int mhi_ep_destroy_device(struct device *dev, void *data);
+
 static int mhi_ep_send_event(struct mhi_ep_cntrl *mhi_cntrl, u32 ring_idx,
 			     struct mhi_ep_ring_element *el)
 {
@@ -492,6 +494,71 @@  static irqreturn_t mhi_ep_irq(int irq, void *data)
 	return IRQ_HANDLED;
 }
 
+static void mhi_ep_abort_transfer(struct mhi_ep_cntrl *mhi_cntrl)
+{
+	struct mhi_ep_ring *ch_ring, *ev_ring;
+	struct mhi_result result = {};
+	struct mhi_ep_chan *mhi_chan;
+	int i;
+
+	/* Stop all the channels */
+	for (i = 0; i < mhi_cntrl->max_chan; i++) {
+		ch_ring = &mhi_cntrl->mhi_chan[i].ring;
+		if (!ch_ring->started)
+			continue;
+
+		mhi_chan = &mhi_cntrl->mhi_chan[i];
+		mutex_lock(&mhi_chan->lock);
+		/* Send channel disconnect status to client drivers */
+		if (mhi_chan->xfer_cb) {
+			result.transaction_status = -ENOTCONN;
+			result.bytes_xferd = 0;
+			mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+		}
+
+		/* Set channel state to DISABLED */
+		mhi_chan->state = MHI_CH_STATE_DISABLED;
+		mutex_unlock(&mhi_chan->lock);
+	}
+
+	flush_workqueue(mhi_cntrl->ring_wq);
+	flush_workqueue(mhi_cntrl->state_wq);
+
+	/* Destroy devices associated with all channels */
+	device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_ep_destroy_device);
+
+	/* Stop and reset the transfer rings */
+	for (i = 0; i < mhi_cntrl->max_chan; i++) {
+		ch_ring = &mhi_cntrl->mhi_chan[i].ring;
+		if (!ch_ring->started)
+			continue;
+
+		mhi_chan = &mhi_cntrl->mhi_chan[i];
+		mutex_lock(&mhi_chan->lock);
+		mhi_ep_ring_reset(mhi_cntrl, ch_ring);
+		mutex_unlock(&mhi_chan->lock);
+	}
+
+	/* Stop and reset the event rings */
+	for (i = 0; i < mhi_cntrl->event_rings; i++) {
+		ev_ring = &mhi_cntrl->mhi_event[i].ring;
+		if (!ev_ring->started)
+			continue;
+
+		mutex_lock(&mhi_cntrl->event_lock);
+		mhi_ep_ring_reset(mhi_cntrl, ev_ring);
+		mutex_unlock(&mhi_cntrl->event_lock);
+	}
+
+	/* Stop and reset the command ring */
+	mhi_ep_ring_reset(mhi_cntrl, &mhi_cntrl->mhi_cmd->ring);
+
+	mhi_ep_free_host_cfg(mhi_cntrl);
+	mhi_ep_mmio_mask_interrupts(mhi_cntrl);
+
+	mhi_cntrl->is_enabled = false;
+}
+
 int mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl)
 {
 	struct device *dev = &mhi_cntrl->mhi_dev->dev;
@@ -548,6 +615,16 @@  int mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl)
 }
 EXPORT_SYMBOL_GPL(mhi_ep_power_up);
 
+void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl)
+{
+	if (mhi_cntrl->is_enabled)
+		mhi_ep_abort_transfer(mhi_cntrl);
+
+	kfree(mhi_cntrl->mhi_event);
+	disable_irq(mhi_cntrl->irq);
+}
+EXPORT_SYMBOL_GPL(mhi_ep_power_down);
+
 static void mhi_ep_release_device(struct device *dev)
 {
 	struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
@@ -828,6 +905,10 @@  int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
 }
 EXPORT_SYMBOL_GPL(mhi_ep_register_controller);
 
+/*
+ * It is expected that the controller drivers will power down the MHI EP stack
+ * using "mhi_ep_power_down()" before calling this function to unregister themselves.
+ */
 void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl)
 {
 	struct mhi_ep_device *mhi_dev = mhi_cntrl->mhi_dev;
diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h
index 53895f1c68e1..4f86e7986c93 100644
--- a/include/linux/mhi_ep.h
+++ b/include/linux/mhi_ep.h
@@ -260,4 +260,10 @@  void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl);
  */
 int mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl);
 
+/**
+ * mhi_ep_power_down - Power down the MHI endpoint stack
+ * @mhi_cntrl: MHI controller
+ */
+void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl);
+
 #endif