diff mbox series

[087/104] virtiofsd: prevent fv_queue_thread() vs virtio_loop() races

Message ID 20191212163904.159893-88-dgilbert@redhat.com (mailing list archive)
State New, archived
Headers show
Series virtiofs daemon [all] | expand

Commit Message

Dr. David Alan Gilbert Dec. 12, 2019, 4:38 p.m. UTC
From: Stefan Hajnoczi <stefanha@redhat.com>

We call into libvhost-user from the virtqueue handler thread and the
vhost-user message processing thread without a lock.  There is nothing
protecting the virtqueue handler thread if the vhost-user message
processing thread changes the virtqueue or memory table while it is
running.

This patch introduces a read-write lock.  Virtqueue handler threads are
readers.  The vhost-user message processing thread is a writer.  This
will allow concurrency for multiqueue in the future while protecting
against fv_queue_thread() vs virtio_loop() races.

Note that the critical sections could be made smaller but it would be
more invasive and require libvhost-user changes.  Let's start simple and
improve performance later, if necessary.  Another option would be an
RCU-style approach with lighter-weight primitives.

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
  Merged with log changes
  pull request 12
---
 tools/virtiofsd/fuse_virtio.c | 34 +++++++++++++++++++++++++++++++++-
 1 file changed, 33 insertions(+), 1 deletion(-)

Comments

Daniel P. Berrangé Jan. 7, 2020, 12:02 p.m. UTC | #1
On Thu, Dec 12, 2019 at 04:38:47PM +0000, Dr. David Alan Gilbert (git) wrote:
> From: Stefan Hajnoczi <stefanha@redhat.com>
> 
> We call into libvhost-user from the virtqueue handler thread and the
> vhost-user message processing thread without a lock.  There is nothing
> protecting the virtqueue handler thread if the vhost-user message
> processing thread changes the virtqueue or memory table while it is
> running.
> 
> This patch introduces a read-write lock.  Virtqueue handler threads are
> readers.  The vhost-user message processing thread is a writer.  This
> will allow concurrency for multiqueue in the future while protecting
> against fv_queue_thread() vs virtio_loop() races.
> 
> Note that the critical sections could be made smaller but it would be
> more invasive and require libvhost-user changes.  Let's start simple and
> improve performance later, if necessary.  Another option would be an
> RCU-style approach with lighter-weight primitives.
> 
> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
>   Merged with log changes
>   pull request 12

Two extraneous lines

> ---
>  tools/virtiofsd/fuse_virtio.c | 34 +++++++++++++++++++++++++++++++++-
>  1 file changed, 33 insertions(+), 1 deletion(-)

Reviewed-by: Daniel P. Berrangé <berrange@redhat.com>


Regards,
Daniel
diff mbox series

Patch

diff --git a/tools/virtiofsd/fuse_virtio.c b/tools/virtiofsd/fuse_virtio.c
index a364f23d5d..2c1e524852 100644
--- a/tools/virtiofsd/fuse_virtio.c
+++ b/tools/virtiofsd/fuse_virtio.c
@@ -58,6 +58,18 @@  struct fv_VuDev {
     VuDev dev;
     struct fuse_session *se;
 
+    /*
+     * Either handle virtqueues or vhost-user protocol messages.  Don't do
+     * both at the same time since that could lead to race conditions if
+     * virtqueues or memory tables change while another thread is accessing
+     * them.
+     *
+     * The assumptions are:
+     * 1. fv_queue_thread() reads/writes to virtqueues and only reads VuDev.
+     * 2. virtio_loop() reads/writes virtqueues and VuDev.
+     */
+    pthread_rwlock_t vu_dispatch_rwlock;
+
     /*
      * The following pair of fields are only accessed in the main
      * virtio_loop
@@ -413,6 +425,8 @@  static void *fv_queue_thread(void *opaque)
              qi->qidx, qi->kick_fd);
     while (1) {
         struct pollfd pf[2];
+        int ret;
+
         pf[0].fd = qi->kick_fd;
         pf[0].events = POLLIN;
         pf[0].revents = 0;
@@ -459,6 +473,9 @@  static void *fv_queue_thread(void *opaque)
             fuse_log(FUSE_LOG_ERR, "Eventfd_read for queue: %m\n");
             break;
         }
+        /* Mutual exclusion with virtio_loop() */
+        ret = pthread_rwlock_rdlock(&qi->virtio_dev->vu_dispatch_rwlock);
+        assert(ret == 0); /* there is no possible error case */
         /* out is from guest, in is too guest */
         unsigned int in_bytes, out_bytes;
         vu_queue_get_avail_bytes(dev, q, &in_bytes, &out_bytes, ~0, ~0);
@@ -467,6 +484,7 @@  static void *fv_queue_thread(void *opaque)
                  "%s: Queue %d gave evalue: %zx available: in: %u out: %u\n",
                  __func__, qi->qidx, (size_t)evalue, in_bytes, out_bytes);
 
+
         while (1) {
             bool allocated_bufv = false;
             struct fuse_bufvec bufv;
@@ -595,6 +613,8 @@  static void *fv_queue_thread(void *opaque)
             free(elem);
             elem = NULL;
         }
+
+        pthread_rwlock_unlock(&qi->virtio_dev->vu_dispatch_rwlock);
     }
 out:
     pthread_mutex_destroy(&ch.lock);
@@ -701,6 +721,8 @@  int virtio_loop(struct fuse_session *se)
 
     while (!fuse_session_exited(se)) {
         struct pollfd pf[1];
+        bool ok;
+        int ret;
         pf[0].fd = se->vu_socketfd;
         pf[0].events = POLLIN;
         pf[0].revents = 0;
@@ -725,7 +747,15 @@  int virtio_loop(struct fuse_session *se)
         }
         assert(pf[0].revents & POLLIN);
         fuse_log(FUSE_LOG_DEBUG, "%s: Got VU event\n", __func__);
-        if (!vu_dispatch(&se->virtio_dev->dev)) {
+        /* Mutual exclusion with fv_queue_thread() */
+        ret = pthread_rwlock_wrlock(&se->virtio_dev->vu_dispatch_rwlock);
+        assert(ret == 0); /* there is no possible error case */
+
+        ok = vu_dispatch(&se->virtio_dev->dev);
+
+        pthread_rwlock_unlock(&se->virtio_dev->vu_dispatch_rwlock);
+
+        if (!ok) {
             fuse_log(FUSE_LOG_ERR, "%s: vu_dispatch failed\n", __func__);
             break;
         }
@@ -871,6 +901,7 @@  int virtio_session_mount(struct fuse_session *se)
 
     se->vu_socketfd = data_sock;
     se->virtio_dev->se = se;
+    pthread_rwlock_init(&se->virtio_dev->vu_dispatch_rwlock, NULL);
     vu_init(&se->virtio_dev->dev, 2, se->vu_socketfd, fv_panic, fv_set_watch,
             fv_remove_watch, &fv_iface);
 
@@ -887,6 +918,7 @@  void virtio_session_close(struct fuse_session *se)
 
     close(se->vu_socketfd);
     free(se->virtio_dev->qi);
+    pthread_rwlock_destroy(&se->virtio_dev->vu_dispatch_rwlock);
     free(se->virtio_dev);
     se->virtio_dev = NULL;
 }