diff mbox series

[v2,1/9] firmware: arm_scmi: Add a virtio channel refcount

Message ID 20220201171601.53316-2-cristian.marussi@arm.com (mailing list archive)
State New, archived
Headers show
Series Add SCMI Virtio & Clock atomic support | expand

Commit Message

Cristian Marussi Feb. 1, 2022, 5:15 p.m. UTC
Currently SCMI VirtIO channels are marked with a ready flag and related
lock to track channel lifetime and support proper synchronization at
shutdown when virtqueues have to be stopped.

This leads to some extended spinlocked sections with IRQs off on the RX
path to keep hold of the ready flag and does not scale well especially when
SCMI VirtIO polling mode will be introduced.

Add an SCMI VirtIO channel dedicated refcount to track active users on both
the TX and the RX path and properly enforce synchronization and cleanup at
shutdown, inhibiting further usage of the channel once freed.

Cc: Igor Skalkin <igor.skalkin@opensynergy.com>
Cc: Peter Hilber <peter.hilber@opensynergy.com>
Signed-off-by: Cristian Marussi <cristian.marussi@arm.com>
---
 drivers/firmware/arm_scmi/virtio.c | 148 +++++++++++++++++++----------
 1 file changed, 100 insertions(+), 48 deletions(-)

Comments

Michael S. Tsirkin Feb. 1, 2022, 10:05 p.m. UTC | #1
On Tue, Feb 01, 2022 at 05:15:53PM +0000, Cristian Marussi wrote:
> Currently SCMI VirtIO channels are marked with a ready flag and related
> lock to track channel lifetime and support proper synchronization at
> shutdown when virtqueues have to be stopped.
> 
> This leads to some extended spinlocked sections with IRQs off on the RX
> path to keep hold of the ready flag and does not scale well especially when
> SCMI VirtIO polling mode will be introduced.
> 
> Add an SCMI VirtIO channel dedicated refcount to track active users on both
> the TX and the RX path and properly enforce synchronization and cleanup at
> shutdown, inhibiting further usage of the channel once freed.
> 
> Cc: Igor Skalkin <igor.skalkin@opensynergy.com>
> Cc: Peter Hilber <peter.hilber@opensynergy.com>
> Signed-off-by: Cristian Marussi <cristian.marussi@arm.com>
> ---
>  drivers/firmware/arm_scmi/virtio.c | 148 +++++++++++++++++++----------
>  1 file changed, 100 insertions(+), 48 deletions(-)
> 
> diff --git a/drivers/firmware/arm_scmi/virtio.c b/drivers/firmware/arm_scmi/virtio.c
> index fd0f6f91fc0b..536e46eab462 100644
> --- a/drivers/firmware/arm_scmi/virtio.c
> +++ b/drivers/firmware/arm_scmi/virtio.c
> @@ -17,7 +17,9 @@
>   * virtqueue. Access to each virtqueue is protected by spinlocks.
>   */
>  
> +#include <linux/completion.h>
>  #include <linux/errno.h>
> +#include <linux/refcount.h>
>  #include <linux/slab.h>
>  #include <linux/virtio.h>
>  #include <linux/virtio_config.h>
> @@ -27,6 +29,7 @@
>  
>  #include "common.h"
>  
> +#define VIRTIO_MAX_RX_TIMEOUT_MS	60000
>  #define VIRTIO_SCMI_MAX_MSG_SIZE 128 /* Value may be increased. */
>  #define VIRTIO_SCMI_MAX_PDU_SIZE \
>  	(VIRTIO_SCMI_MAX_MSG_SIZE + SCMI_MSG_MAX_PROT_OVERHEAD)
> @@ -39,23 +42,21 @@
>   * @cinfo: SCMI Tx or Rx channel
>   * @free_list: List of unused scmi_vio_msg, maintained for Tx channels only
>   * @is_rx: Whether channel is an Rx channel
> - * @ready: Whether transport user is ready to hear about channel
>   * @max_msg: Maximum number of pending messages for this channel.
> - * @lock: Protects access to all members except ready.
> - * @ready_lock: Protects access to ready. If required, it must be taken before
> - *              lock.
> + * @lock: Protects access to all members except users.
> + * @shutdown_done: A reference to a completion used when freeing this channel.
> + * @users: A reference count to currently active users of this channel.
>   */
>  struct scmi_vio_channel {
>  	struct virtqueue *vqueue;
>  	struct scmi_chan_info *cinfo;
>  	struct list_head free_list;
>  	bool is_rx;
> -	bool ready;
>  	unsigned int max_msg;
> -	/* lock to protect access to all members except ready. */
> +	/* lock to protect access to all members except users. */
>  	spinlock_t lock;
> -	/* lock to rotects access to ready flag. */
> -	spinlock_t ready_lock;
> +	struct completion *shutdown_done;
> +	refcount_t users;
>  };
>  
>  /**
> @@ -76,6 +77,71 @@ struct scmi_vio_msg {
>  /* Only one SCMI VirtIO device can possibly exist */
>  static struct virtio_device *scmi_vdev;
>  
> +static void scmi_vio_channel_ready(struct scmi_vio_channel *vioch,
> +				   struct scmi_chan_info *cinfo)
> +{
> +	unsigned long flags;
> +
> +	spin_lock_irqsave(&vioch->lock, flags);
> +	cinfo->transport_info = vioch;
> +	/* Indirectly setting channel not available any more */
> +	vioch->cinfo = cinfo;
> +	spin_unlock_irqrestore(&vioch->lock, flags);
> +
> +	refcount_set(&vioch->users, 1);
> +}
> +
> +static inline bool scmi_vio_channel_acquire(struct scmi_vio_channel *vioch)
> +{
> +	return refcount_inc_not_zero(&vioch->users);
> +}
> +
> +static inline void scmi_vio_channel_release(struct scmi_vio_channel *vioch)
> +{
> +	if (refcount_dec_and_test(&vioch->users)) {
> +		unsigned long flags;
> +
> +		spin_lock_irqsave(&vioch->lock, flags);
> +		if (vioch->shutdown_done) {
> +			vioch->cinfo = NULL;
> +			complete(vioch->shutdown_done);
> +		}
> +		spin_unlock_irqrestore(&vioch->lock, flags);
> +	}
> +}
> +
> +static void scmi_vio_channel_cleanup_sync(struct scmi_vio_channel *vioch)
> +{
> +	int timeout;
> +	char *vq_name;
> +	unsigned long flags;
> +	struct device *dev;
> +	DECLARE_COMPLETION_ONSTACK(vioch_shutdown_done);
> +
> +	/*
> +	 * Prepare to wait for the last release if not already released
> +	 * or in progress.
> +	 */
> +	spin_lock_irqsave(&vioch->lock, flags);
> +	if (!vioch->cinfo || vioch->shutdown_done) {
> +		spin_unlock_irqrestore(&vioch->lock, flags);
> +		return;
> +	}
> +	vioch->shutdown_done = &vioch_shutdown_done;
> +	vq_name = vioch->is_rx ? "RX" : "TX";
> +	/* vioch->cinfo could be NULLified after the release */
> +	dev = vioch->cinfo->dev;
> +	spin_unlock_irqrestore(&vioch->lock, flags);
> +
> +	scmi_vio_channel_release(vioch);
> +
> +	timeout = msecs_to_jiffies(VIRTIO_MAX_RX_TIMEOUT_MS + 10);
> +	/* Let any possibly concurrent RX path release the channel */
> +	if (!wait_for_completion_timeout(vioch->shutdown_done, timeout))
> +		dev_warn(dev,
> +			 "Timeout shutting down %s VQ.\n", vq_name);
> +}
> +

Hmm. So if it times out then what? It's ok to corrupt memory then?
Why? I suspect if you want to recover from this you need to mark device
as broken, synchronize with all callbacks (we don't have an API
for that but we really should). Only then you will know it's
not doing anything.


>  static bool scmi_vio_have_vq_rx(struct virtio_device *vdev)
>  {
>  	return virtio_has_feature(vdev, VIRTIO_SCMI_F_P2A_CHANNELS);
> @@ -119,7 +185,7 @@ static void scmi_finalize_message(struct scmi_vio_channel *vioch,
>  
>  static void scmi_vio_complete_cb(struct virtqueue *vqueue)
>  {
> -	unsigned long ready_flags;
> +	unsigned long flags;
>  	unsigned int length;
>  	struct scmi_vio_channel *vioch;
>  	struct scmi_vio_msg *msg;
> @@ -130,27 +196,27 @@ static void scmi_vio_complete_cb(struct virtqueue *vqueue)
>  	vioch = &((struct scmi_vio_channel *)vqueue->vdev->priv)[vqueue->index];
>  
>  	for (;;) {
> -		spin_lock_irqsave(&vioch->ready_lock, ready_flags);
> -
> -		if (!vioch->ready) {
> +		if (!scmi_vio_channel_acquire(vioch)) {
>  			if (!cb_enabled)
>  				(void)virtqueue_enable_cb(vqueue);
> -			goto unlock_ready_out;
> +			return;
>  		}
>  
> -		/* IRQs already disabled here no need to irqsave */
> -		spin_lock(&vioch->lock);
> +		spin_lock_irqsave(&vioch->lock, flags);
>  		if (cb_enabled) {
>  			virtqueue_disable_cb(vqueue);
>  			cb_enabled = false;
>  		}
>  		msg = virtqueue_get_buf(vqueue, &length);
>  		if (!msg) {
> -			if (virtqueue_enable_cb(vqueue))
> -				goto unlock_out;
> +			if (virtqueue_enable_cb(vqueue)) {
> +				spin_unlock_irqrestore(&vioch->lock, flags);
> +				scmi_vio_channel_release(vioch);
> +				return;
> +			}
>  			cb_enabled = true;
>  		}
> -		spin_unlock(&vioch->lock);
> +		spin_unlock_irqrestore(&vioch->lock, flags);
>  
>  		if (msg) {
>  			msg->rx_len = length;
> @@ -161,19 +227,14 @@ static void scmi_vio_complete_cb(struct virtqueue *vqueue)
>  		}
>  
>  		/*
> -		 * Release ready_lock and re-enable IRQs between loop iterations
> -		 * to allow virtio_chan_free() to possibly kick in and set the
> -		 * flag vioch->ready to false even in between processing of
> -		 * messages, so as to force outstanding messages to be ignored
> -		 * when system is shutting down.
> +		 * Release vio channel between loop iterations to allow
> +		 * virtio_chan_free() to eventually fully release it when
> +		 * shutting down; in such a case, any outstanding message will
> +		 * be ignored since this loop will bail out at the next
> +		 * iteration.
>  		 */
> -		spin_unlock_irqrestore(&vioch->ready_lock, ready_flags);
> +		scmi_vio_channel_release(vioch);
>  	}
> -
> -unlock_out:
> -	spin_unlock(&vioch->lock);
> -unlock_ready_out:
> -	spin_unlock_irqrestore(&vioch->ready_lock, ready_flags);
>  }
>  
>  static const char *const scmi_vio_vqueue_names[] = { "tx", "rx" };
> @@ -273,35 +334,20 @@ static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
>  		}
>  	}
>  
> -	spin_lock_irqsave(&vioch->lock, flags);
> -	cinfo->transport_info = vioch;
> -	/* Indirectly setting channel not available any more */
> -	vioch->cinfo = cinfo;
> -	spin_unlock_irqrestore(&vioch->lock, flags);
> -
> -	spin_lock_irqsave(&vioch->ready_lock, flags);
> -	vioch->ready = true;
> -	spin_unlock_irqrestore(&vioch->ready_lock, flags);
> +	scmi_vio_channel_ready(vioch, cinfo);
>  
>  	return 0;
>  }
>  
>  static int virtio_chan_free(int id, void *p, void *data)
>  {
> -	unsigned long flags;
>  	struct scmi_chan_info *cinfo = p;
>  	struct scmi_vio_channel *vioch = cinfo->transport_info;
>  
> -	spin_lock_irqsave(&vioch->ready_lock, flags);
> -	vioch->ready = false;
> -	spin_unlock_irqrestore(&vioch->ready_lock, flags);
> +	scmi_vio_channel_cleanup_sync(vioch);
>  
>  	scmi_free_channel(cinfo, data, id);
>  
> -	spin_lock_irqsave(&vioch->lock, flags);
> -	vioch->cinfo = NULL;
> -	spin_unlock_irqrestore(&vioch->lock, flags);
> -
>  	return 0;
>  }
>  
> @@ -316,10 +362,14 @@ static int virtio_send_message(struct scmi_chan_info *cinfo,
>  	int rc;
>  	struct scmi_vio_msg *msg;
>  
> +	if (!scmi_vio_channel_acquire(vioch))
> +		return -EINVAL;
> +
>  	spin_lock_irqsave(&vioch->lock, flags);
>  
>  	if (list_empty(&vioch->free_list)) {
>  		spin_unlock_irqrestore(&vioch->lock, flags);
> +		scmi_vio_channel_release(vioch);
>  		return -EBUSY;
>  	}
>  
> @@ -342,6 +392,8 @@ static int virtio_send_message(struct scmi_chan_info *cinfo,
>  
>  	spin_unlock_irqrestore(&vioch->lock, flags);
>  
> +	scmi_vio_channel_release(vioch);
> +
>  	return rc;
>  }
>  
> @@ -416,7 +468,6 @@ static int scmi_vio_probe(struct virtio_device *vdev)
>  		unsigned int sz;
>  
>  		spin_lock_init(&channels[i].lock);
> -		spin_lock_init(&channels[i].ready_lock);
>  		INIT_LIST_HEAD(&channels[i].free_list);
>  		channels[i].vqueue = vqs[i];
>  
> @@ -503,7 +554,8 @@ const struct scmi_desc scmi_virtio_desc = {
>  	.transport_init = virtio_scmi_init,
>  	.transport_exit = virtio_scmi_exit,
>  	.ops = &scmi_virtio_ops,
> -	.max_rx_timeout_ms = 60000, /* for non-realtime virtio devices */
> +	/* for non-realtime virtio devices */
> +	.max_rx_timeout_ms = VIRTIO_MAX_RX_TIMEOUT_MS,
>  	.max_msg = 0, /* overridden by virtio_get_max_msg() */
>  	.max_msg_size = VIRTIO_SCMI_MAX_MSG_SIZE,
>  };
> -- 
> 2.17.1
> 
> 
> _______________________________________________
> linux-arm-kernel mailing list
> linux-arm-kernel@lists.infradead.org
> http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
>
Cristian Marussi Feb. 3, 2022, 10:08 a.m. UTC | #2
On Tue, Feb 01, 2022 at 05:05:27PM -0500, Michael S. Tsirkin wrote:
> On Tue, Feb 01, 2022 at 05:15:53PM +0000, Cristian Marussi wrote:
> > Currently SCMI VirtIO channels are marked with a ready flag and related
> > lock to track channel lifetime and support proper synchronization at
> > shutdown when virtqueues have to be stopped.
> > 
> > This leads to some extended spinlocked sections with IRQs off on the RX
> > path to keep hold of the ready flag and does not scale well especially when
> > SCMI VirtIO polling mode will be introduced.
> > 
> > Add an SCMI VirtIO channel dedicated refcount to track active users on both
> > the TX and the RX path and properly enforce synchronization and cleanup at
> > shutdown, inhibiting further usage of the channel once freed.
> > 
> > Cc: Igor Skalkin <igor.skalkin@opensynergy.com>
> > Cc: Peter Hilber <peter.hilber@opensynergy.com>
> > Signed-off-by: Cristian Marussi <cristian.marussi@arm.com>
> > ---
> >  drivers/firmware/arm_scmi/virtio.c | 148 +++++++++++++++++++----------
> >  1 file changed, 100 insertions(+), 48 deletions(-)
> > 
> > diff --git a/drivers/firmware/arm_scmi/virtio.c b/drivers/firmware/arm_scmi/virtio.c
> > index fd0f6f91fc0b..536e46eab462 100644
> > --- a/drivers/firmware/arm_scmi/virtio.c
> > +++ b/drivers/firmware/arm_scmi/virtio.c
> > @@ -17,7 +17,9 @@
> >   * virtqueue. Access to each virtqueue is protected by spinlocks.
> >   */
> >  

Hi Michael,

thanks for your review first of all.

> > +#include <linux/completion.h>
> >  #include <linux/errno.h>
> > +#include <linux/refcount.h>
> >  #include <linux/slab.h>
> >  #include <linux/virtio.h>
> >  #include <linux/virtio_config.h>
> > @@ -27,6 +29,7 @@
> >  
> >  #include "common.h"
> >  
> > +#define VIRTIO_MAX_RX_TIMEOUT_MS	60000
> >  #define VIRTIO_SCMI_MAX_MSG_SIZE 128 /* Value may be increased. */
> >  #define VIRTIO_SCMI_MAX_PDU_SIZE \
> >  	(VIRTIO_SCMI_MAX_MSG_SIZE + SCMI_MSG_MAX_PROT_OVERHEAD)
> > @@ -39,23 +42,21 @@
> >   * @cinfo: SCMI Tx or Rx channel
> >   * @free_list: List of unused scmi_vio_msg, maintained for Tx channels only
> >   * @is_rx: Whether channel is an Rx channel
> > - * @ready: Whether transport user is ready to hear about channel
> >   * @max_msg: Maximum number of pending messages for this channel.
> > - * @lock: Protects access to all members except ready.
> > - * @ready_lock: Protects access to ready. If required, it must be taken before
> > - *              lock.
> > + * @lock: Protects access to all members except users.
> > + * @shutdown_done: A reference to a completion used when freeing this channel.
> > + * @users: A reference count to currently active users of this channel.
> >   */
> >  struct scmi_vio_channel {
> >  	struct virtqueue *vqueue;
> >  	struct scmi_chan_info *cinfo;
> >  	struct list_head free_list;
> >  	bool is_rx;
> > -	bool ready;
> >  	unsigned int max_msg;
> > -	/* lock to protect access to all members except ready. */
> > +	/* lock to protect access to all members except users. */
> >  	spinlock_t lock;
> > -	/* lock to rotects access to ready flag. */
> > -	spinlock_t ready_lock;
> > +	struct completion *shutdown_done;
> > +	refcount_t users;
> >  };
> >  
> >  /**
> > @@ -76,6 +77,71 @@ struct scmi_vio_msg {
> >  /* Only one SCMI VirtIO device can possibly exist */
> >  static struct virtio_device *scmi_vdev;
> >  
> > +static void scmi_vio_channel_ready(struct scmi_vio_channel *vioch,
> > +				   struct scmi_chan_info *cinfo)
> > +{
> > +	unsigned long flags;
> > +
> > +	spin_lock_irqsave(&vioch->lock, flags);
> > +	cinfo->transport_info = vioch;
> > +	/* Indirectly setting channel not available any more */
> > +	vioch->cinfo = cinfo;
> > +	spin_unlock_irqrestore(&vioch->lock, flags);
> > +
> > +	refcount_set(&vioch->users, 1);
> > +}
> > +
> > +static inline bool scmi_vio_channel_acquire(struct scmi_vio_channel *vioch)
> > +{
> > +	return refcount_inc_not_zero(&vioch->users);
> > +}
> > +
> > +static inline void scmi_vio_channel_release(struct scmi_vio_channel *vioch)
> > +{
> > +	if (refcount_dec_and_test(&vioch->users)) {
> > +		unsigned long flags;
> > +
> > +		spin_lock_irqsave(&vioch->lock, flags);
> > +		if (vioch->shutdown_done) {
> > +			vioch->cinfo = NULL;
> > +			complete(vioch->shutdown_done);
> > +		}
> > +		spin_unlock_irqrestore(&vioch->lock, flags);
> > +	}
> > +}
> > +
> > +static void scmi_vio_channel_cleanup_sync(struct scmi_vio_channel *vioch)
> > +{
> > +	int timeout;
> > +	char *vq_name;
> > +	unsigned long flags;
> > +	struct device *dev;
> > +	DECLARE_COMPLETION_ONSTACK(vioch_shutdown_done);
> > +
> > +	/*
> > +	 * Prepare to wait for the last release if not already released
> > +	 * or in progress.
> > +	 */
> > +	spin_lock_irqsave(&vioch->lock, flags);
> > +	if (!vioch->cinfo || vioch->shutdown_done) {
> > +		spin_unlock_irqrestore(&vioch->lock, flags);
> > +		return;
> > +	}
> > +	vioch->shutdown_done = &vioch_shutdown_done;
> > +	vq_name = vioch->is_rx ? "RX" : "TX";
> > +	/* vioch->cinfo could be NULLified after the release */
> > +	dev = vioch->cinfo->dev;
> > +	spin_unlock_irqrestore(&vioch->lock, flags);
> > +
> > +	scmi_vio_channel_release(vioch);
> > +
> > +	timeout = msecs_to_jiffies(VIRTIO_MAX_RX_TIMEOUT_MS + 10);
> > +	/* Let any possibly concurrent RX path release the channel */
> > +	if (!wait_for_completion_timeout(vioch->shutdown_done, timeout))
> > +		dev_warn(dev,
> > +			 "Timeout shutting down %s VQ.\n", vq_name);
> > +}
> > +
> 
> Hmm. So if it times out then what? It's ok to corrupt memory then?
> Why? I suspect if you want to recover from this you need to mark device
> as broken, synchronize with all callbacks (we don't have an API
> for that but we really should). Only then you will know it's
> not doing anything.
> 

In fact I was not sure how to address this situation and not sure even
if it was a real possibility, but indeed a constant flood of evily
interleaved messages coming from the queues could keep the refcount
indefinitely > 0 (and the fact that I am shutting down is no excuse to
corrupt memory...my bad)

So what if I mark the device broken here BEFORE this possible final
release, so as to disrupt completely any communication on the virtqueus
at first (inhibiting also any callbacks invocation on IRQ) and then sync
on my channel refcount as it is now.
(and then just wait indefintely without any timeout..)

something like (not tried still..):

....
	vq_name = vioch->is_rx ? "RX" : "TX";
	/* vioch->cinfo could be NULLified after the release */
	dev = vioch->cinfo->dev;
+	virtio_break_device(vioch->vqueue->vdev);
	spin_unlock_irqrestore(&vioch->lock, flags);

	scmi_vio_channel_release(vioch);

	/* Let any possibly concurrent RX path release the channel */
+	wait_for_completion(vioch->shutdown_done);


Thanks,
Cristian
diff mbox series

Patch

diff --git a/drivers/firmware/arm_scmi/virtio.c b/drivers/firmware/arm_scmi/virtio.c
index fd0f6f91fc0b..536e46eab462 100644
--- a/drivers/firmware/arm_scmi/virtio.c
+++ b/drivers/firmware/arm_scmi/virtio.c
@@ -17,7 +17,9 @@ 
  * virtqueue. Access to each virtqueue is protected by spinlocks.
  */
 
+#include <linux/completion.h>
 #include <linux/errno.h>
+#include <linux/refcount.h>
 #include <linux/slab.h>
 #include <linux/virtio.h>
 #include <linux/virtio_config.h>
@@ -27,6 +29,7 @@ 
 
 #include "common.h"
 
+#define VIRTIO_MAX_RX_TIMEOUT_MS	60000
 #define VIRTIO_SCMI_MAX_MSG_SIZE 128 /* Value may be increased. */
 #define VIRTIO_SCMI_MAX_PDU_SIZE \
 	(VIRTIO_SCMI_MAX_MSG_SIZE + SCMI_MSG_MAX_PROT_OVERHEAD)
@@ -39,23 +42,21 @@ 
  * @cinfo: SCMI Tx or Rx channel
  * @free_list: List of unused scmi_vio_msg, maintained for Tx channels only
  * @is_rx: Whether channel is an Rx channel
- * @ready: Whether transport user is ready to hear about channel
  * @max_msg: Maximum number of pending messages for this channel.
- * @lock: Protects access to all members except ready.
- * @ready_lock: Protects access to ready. If required, it must be taken before
- *              lock.
+ * @lock: Protects access to all members except users.
+ * @shutdown_done: A reference to a completion used when freeing this channel.
+ * @users: A reference count to currently active users of this channel.
  */
 struct scmi_vio_channel {
 	struct virtqueue *vqueue;
 	struct scmi_chan_info *cinfo;
 	struct list_head free_list;
 	bool is_rx;
-	bool ready;
 	unsigned int max_msg;
-	/* lock to protect access to all members except ready. */
+	/* lock to protect access to all members except users. */
 	spinlock_t lock;
-	/* lock to rotects access to ready flag. */
-	spinlock_t ready_lock;
+	struct completion *shutdown_done;
+	refcount_t users;
 };
 
 /**
@@ -76,6 +77,71 @@  struct scmi_vio_msg {
 /* Only one SCMI VirtIO device can possibly exist */
 static struct virtio_device *scmi_vdev;
 
+static void scmi_vio_channel_ready(struct scmi_vio_channel *vioch,
+				   struct scmi_chan_info *cinfo)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&vioch->lock, flags);
+	cinfo->transport_info = vioch;
+	/* Indirectly setting channel not available any more */
+	vioch->cinfo = cinfo;
+	spin_unlock_irqrestore(&vioch->lock, flags);
+
+	refcount_set(&vioch->users, 1);
+}
+
+static inline bool scmi_vio_channel_acquire(struct scmi_vio_channel *vioch)
+{
+	return refcount_inc_not_zero(&vioch->users);
+}
+
+static inline void scmi_vio_channel_release(struct scmi_vio_channel *vioch)
+{
+	if (refcount_dec_and_test(&vioch->users)) {
+		unsigned long flags;
+
+		spin_lock_irqsave(&vioch->lock, flags);
+		if (vioch->shutdown_done) {
+			vioch->cinfo = NULL;
+			complete(vioch->shutdown_done);
+		}
+		spin_unlock_irqrestore(&vioch->lock, flags);
+	}
+}
+
+static void scmi_vio_channel_cleanup_sync(struct scmi_vio_channel *vioch)
+{
+	int timeout;
+	char *vq_name;
+	unsigned long flags;
+	struct device *dev;
+	DECLARE_COMPLETION_ONSTACK(vioch_shutdown_done);
+
+	/*
+	 * Prepare to wait for the last release if not already released
+	 * or in progress.
+	 */
+	spin_lock_irqsave(&vioch->lock, flags);
+	if (!vioch->cinfo || vioch->shutdown_done) {
+		spin_unlock_irqrestore(&vioch->lock, flags);
+		return;
+	}
+	vioch->shutdown_done = &vioch_shutdown_done;
+	vq_name = vioch->is_rx ? "RX" : "TX";
+	/* vioch->cinfo could be NULLified after the release */
+	dev = vioch->cinfo->dev;
+	spin_unlock_irqrestore(&vioch->lock, flags);
+
+	scmi_vio_channel_release(vioch);
+
+	timeout = msecs_to_jiffies(VIRTIO_MAX_RX_TIMEOUT_MS + 10);
+	/* Let any possibly concurrent RX path release the channel */
+	if (!wait_for_completion_timeout(vioch->shutdown_done, timeout))
+		dev_warn(dev,
+			 "Timeout shutting down %s VQ.\n", vq_name);
+}
+
 static bool scmi_vio_have_vq_rx(struct virtio_device *vdev)
 {
 	return virtio_has_feature(vdev, VIRTIO_SCMI_F_P2A_CHANNELS);
@@ -119,7 +185,7 @@  static void scmi_finalize_message(struct scmi_vio_channel *vioch,
 
 static void scmi_vio_complete_cb(struct virtqueue *vqueue)
 {
-	unsigned long ready_flags;
+	unsigned long flags;
 	unsigned int length;
 	struct scmi_vio_channel *vioch;
 	struct scmi_vio_msg *msg;
@@ -130,27 +196,27 @@  static void scmi_vio_complete_cb(struct virtqueue *vqueue)
 	vioch = &((struct scmi_vio_channel *)vqueue->vdev->priv)[vqueue->index];
 
 	for (;;) {
-		spin_lock_irqsave(&vioch->ready_lock, ready_flags);
-
-		if (!vioch->ready) {
+		if (!scmi_vio_channel_acquire(vioch)) {
 			if (!cb_enabled)
 				(void)virtqueue_enable_cb(vqueue);
-			goto unlock_ready_out;
+			return;
 		}
 
-		/* IRQs already disabled here no need to irqsave */
-		spin_lock(&vioch->lock);
+		spin_lock_irqsave(&vioch->lock, flags);
 		if (cb_enabled) {
 			virtqueue_disable_cb(vqueue);
 			cb_enabled = false;
 		}
 		msg = virtqueue_get_buf(vqueue, &length);
 		if (!msg) {
-			if (virtqueue_enable_cb(vqueue))
-				goto unlock_out;
+			if (virtqueue_enable_cb(vqueue)) {
+				spin_unlock_irqrestore(&vioch->lock, flags);
+				scmi_vio_channel_release(vioch);
+				return;
+			}
 			cb_enabled = true;
 		}
-		spin_unlock(&vioch->lock);
+		spin_unlock_irqrestore(&vioch->lock, flags);
 
 		if (msg) {
 			msg->rx_len = length;
@@ -161,19 +227,14 @@  static void scmi_vio_complete_cb(struct virtqueue *vqueue)
 		}
 
 		/*
-		 * Release ready_lock and re-enable IRQs between loop iterations
-		 * to allow virtio_chan_free() to possibly kick in and set the
-		 * flag vioch->ready to false even in between processing of
-		 * messages, so as to force outstanding messages to be ignored
-		 * when system is shutting down.
+		 * Release vio channel between loop iterations to allow
+		 * virtio_chan_free() to eventually fully release it when
+		 * shutting down; in such a case, any outstanding message will
+		 * be ignored since this loop will bail out at the next
+		 * iteration.
 		 */
-		spin_unlock_irqrestore(&vioch->ready_lock, ready_flags);
+		scmi_vio_channel_release(vioch);
 	}
-
-unlock_out:
-	spin_unlock(&vioch->lock);
-unlock_ready_out:
-	spin_unlock_irqrestore(&vioch->ready_lock, ready_flags);
 }
 
 static const char *const scmi_vio_vqueue_names[] = { "tx", "rx" };
@@ -273,35 +334,20 @@  static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
 		}
 	}
 
-	spin_lock_irqsave(&vioch->lock, flags);
-	cinfo->transport_info = vioch;
-	/* Indirectly setting channel not available any more */
-	vioch->cinfo = cinfo;
-	spin_unlock_irqrestore(&vioch->lock, flags);
-
-	spin_lock_irqsave(&vioch->ready_lock, flags);
-	vioch->ready = true;
-	spin_unlock_irqrestore(&vioch->ready_lock, flags);
+	scmi_vio_channel_ready(vioch, cinfo);
 
 	return 0;
 }
 
 static int virtio_chan_free(int id, void *p, void *data)
 {
-	unsigned long flags;
 	struct scmi_chan_info *cinfo = p;
 	struct scmi_vio_channel *vioch = cinfo->transport_info;
 
-	spin_lock_irqsave(&vioch->ready_lock, flags);
-	vioch->ready = false;
-	spin_unlock_irqrestore(&vioch->ready_lock, flags);
+	scmi_vio_channel_cleanup_sync(vioch);
 
 	scmi_free_channel(cinfo, data, id);
 
-	spin_lock_irqsave(&vioch->lock, flags);
-	vioch->cinfo = NULL;
-	spin_unlock_irqrestore(&vioch->lock, flags);
-
 	return 0;
 }
 
@@ -316,10 +362,14 @@  static int virtio_send_message(struct scmi_chan_info *cinfo,
 	int rc;
 	struct scmi_vio_msg *msg;
 
+	if (!scmi_vio_channel_acquire(vioch))
+		return -EINVAL;
+
 	spin_lock_irqsave(&vioch->lock, flags);
 
 	if (list_empty(&vioch->free_list)) {
 		spin_unlock_irqrestore(&vioch->lock, flags);
+		scmi_vio_channel_release(vioch);
 		return -EBUSY;
 	}
 
@@ -342,6 +392,8 @@  static int virtio_send_message(struct scmi_chan_info *cinfo,
 
 	spin_unlock_irqrestore(&vioch->lock, flags);
 
+	scmi_vio_channel_release(vioch);
+
 	return rc;
 }
 
@@ -416,7 +468,6 @@  static int scmi_vio_probe(struct virtio_device *vdev)
 		unsigned int sz;
 
 		spin_lock_init(&channels[i].lock);
-		spin_lock_init(&channels[i].ready_lock);
 		INIT_LIST_HEAD(&channels[i].free_list);
 		channels[i].vqueue = vqs[i];
 
@@ -503,7 +554,8 @@  const struct scmi_desc scmi_virtio_desc = {
 	.transport_init = virtio_scmi_init,
 	.transport_exit = virtio_scmi_exit,
 	.ops = &scmi_virtio_ops,
-	.max_rx_timeout_ms = 60000, /* for non-realtime virtio devices */
+	/* for non-realtime virtio devices */
+	.max_rx_timeout_ms = VIRTIO_MAX_RX_TIMEOUT_MS,
 	.max_msg = 0, /* overridden by virtio_get_max_msg() */
 	.max_msg_size = VIRTIO_SCMI_MAX_MSG_SIZE,
 };