diff mbox series

[v3,3/4] vhost-user-rng: backend: Add RNG vhost-user daemon implementation

Message ID 20210710005929.1702431-4-mathieu.poirier@linaro.org (mailing list archive)
State New, archived
Headers show
Series virtio: Add vhost-user based RNG | expand

Commit Message

Mathieu Poirier July 10, 2021, 12:59 a.m. UTC
This patch provides the vhost-user backend implementation to work
in tandem with the vhost-user-rng implementation of the QEMU VMM.

It uses the vhost-user API so that other VMM can re-use the interface
without having to write the driver again.

Signed-off-by: Mathieu Poirier <mathieu.poirier@linaro.org>
---
 tools/meson.build                        |   8 +
 tools/vhost-user-rng/50-qemu-rng.json.in |   5 +
 tools/vhost-user-rng/main.c              | 403 +++++++++++++++++++++++
 tools/vhost-user-rng/meson.build         |  10 +
 4 files changed, 426 insertions(+)
 create mode 100644 tools/vhost-user-rng/50-qemu-rng.json.in
 create mode 100644 tools/vhost-user-rng/main.c
 create mode 100644 tools/vhost-user-rng/meson.build

Comments

Alex Bennée July 21, 2021, 11:30 a.m. UTC | #1
Mathieu Poirier <mathieu.poirier@linaro.org> writes:

> This patch provides the vhost-user backend implementation to work
> in tandem with the vhost-user-rng implementation of the QEMU VMM.
>
> It uses the vhost-user API so that other VMM can re-use the interface
> without having to write the driver again.
>
> Signed-off-by: Mathieu Poirier <mathieu.poirier@linaro.org>
> ---
>  tools/meson.build                        |   8 +
>  tools/vhost-user-rng/50-qemu-rng.json.in |   5 +
>  tools/vhost-user-rng/main.c              | 403 +++++++++++++++++++++++
>  tools/vhost-user-rng/meson.build         |  10 +
>  4 files changed, 426 insertions(+)
>  create mode 100644 tools/vhost-user-rng/50-qemu-rng.json.in
>  create mode 100644 tools/vhost-user-rng/main.c
>  create mode 100644 tools/vhost-user-rng/meson.build
>
> diff --git a/tools/meson.build b/tools/meson.build
> index 3e5a0abfa29f..66b0a11fbb45 100644
> --- a/tools/meson.build
> +++ b/tools/meson.build
> @@ -24,3 +24,11 @@ endif
>  if have_virtiofsd
>    subdir('virtiofsd')
>  endif
> +
> +have_virtiorng = (have_system and
> +    have_tools and
> +    'CONFIG_LINUX' in config_host)
> +
> +if have_virtiorng
> +  subdir('vhost-user-rng')
> +endif
> diff --git a/tools/vhost-user-rng/50-qemu-rng.json.in b/tools/vhost-user-rng/50-qemu-rng.json.in
> new file mode 100644
> index 000000000000..9186c3c6fe1d
> --- /dev/null
> +++ b/tools/vhost-user-rng/50-qemu-rng.json.in
> @@ -0,0 +1,5 @@
> +{
> +  "description": "QEMU vhost-user-rng",
> +  "type": "bridge",
> +  "binary": "@libexecdir@/vhost-user-rng"
> +}
> diff --git a/tools/vhost-user-rng/main.c b/tools/vhost-user-rng/main.c
> new file mode 100644
> index 000000000000..c3b8f6922757
> --- /dev/null
> +++ b/tools/vhost-user-rng/main.c
> @@ -0,0 +1,403 @@
> +/*
> + * VIRTIO RNG Emulation via vhost-user
> + *
> + * Copyright (c) 2021 Mathieu Poirier <mathieu.poirier@linaro.org>
> + *
> + * SPDX-License-Identifier: GPL-2.0-or-later
> + */
> +
> +#define G_LOG_DOMAIN "vhost-user-rng"
> +#define G_LOG_USE_STRUCTURED 1
> +
> +#include <glib.h>
> +#include <gio/gio.h>
> +#include <gio/gunixsocketaddress.h>
> +#include <glib-unix.h>
> +#include <glib/gstdio.h>
> +#include <pthread.h>
> +#include <signal.h>
> +#include <stdio.h>
> +#include <stdbool.h>
> +#include <string.h>
> +#include <inttypes.h>
> +#include <fcntl.h>
> +#include <sys/ioctl.h>
> +#include <sys/types.h>
> +#include <sys/stat.h>
> +#include <sys/mman.h>
> +#include <time.h>
> +#include <unistd.h>
> +#include <endian.h>
> +#include <assert.h>
> +
> +#include "qemu/cutils.h"
> +#include "subprojects/libvhost-user/libvhost-user-glib.h"
> +#include "subprojects/libvhost-user/libvhost-user.h"
> +
> +#ifndef container_of
> +#define container_of(ptr, type, member) ({                      \
> +        const typeof(((type *) 0)->member) * __mptr = (ptr);     \
> +        (type *) ((char *) __mptr - offsetof(type, member)); })
> +#endif
> +
> +typedef struct {
> +    VugDev dev;
> +    struct itimerspec ts;
> +    timer_t rate_limit_timer;
> +    pthread_mutex_t rng_mutex;
> +    pthread_cond_t rng_cond;

I'm confused by the need for a mutex in a single-threaded application.

> +    int64_t quota_remaining;
> +    bool activate_timer;
> +    GMainLoop *loop;
> +} VuRNG;
> +
> +static gboolean print_cap, verbose;
> +static gchar *source_path, *socket_path;
> +static gint source_fd, socket_fd = -1;
> +
> +/* Defaults tailored on virtio-rng.c */
> +static uint32_t period_ms = 1 << 16;
> +static uint64_t max_bytes = INT64_MAX;
> +
> +static void check_rate_limit(union sigval sv)
> +{
> +    VuRNG *rng = sv.sival_ptr;
> +    bool wakeup = false;
> +
> +    pthread_mutex_lock(&rng->rng_mutex);
> +    /*
> +     * The timer has expired and the guest has used all available
> +     * entropy, which means function vu_rng_handle_request() is waiting
> +     * on us.  As such wake it up once we're done here.
> +     */
> +    if (rng->quota_remaining == 0) {
> +        wakeup = true;
> +    }
> +
> +    /*
> +     * Reset the entropy available to the guest and tell function
> +     * vu_rng_handle_requests() to start the timer before using it.
> +     */
> +    rng->quota_remaining = max_bytes;
> +    rng->activate_timer = true;
> +    pthread_mutex_unlock(&rng->rng_mutex);
> +
> +    if (wakeup) {
> +        pthread_cond_signal(&rng->rng_cond);
> +    }
> +}
> +
> +static void setup_timer(VuRNG *rng)
> +{
> +    struct sigevent sev;
> +    int ret;
> +
> +    memset(&rng->ts, 0, sizeof(struct itimerspec));
> +    rng->ts.it_value.tv_sec = period_ms / 1000;
> +    rng->ts.it_value.tv_nsec = (period_ms % 1000) * 1000000;
> +
> +    /*
> +     * Call function check_rate_limit() as if it was the start of
> +     * a new thread when the timer expires.
> +     */
> +    sev.sigev_notify = SIGEV_THREAD;
> +    sev.sigev_notify_function = check_rate_limit;
> +    sev.sigev_value.sival_ptr = rng;
> +    /* Needs to be NULL if defaults attributes are to be used. */
> +    sev.sigev_notify_attributes = NULL;
> +    ret = timer_create(CLOCK_MONOTONIC, &sev, &rng->rate_limit_timer);
> +    if (ret < 0) {
> +        fprintf(stderr, "timer_create() failed\n");
> +    }

Ahh I see why now. I think you could avoid this by using glib's own
internal g_timeout_add() function. This would then create a timer which
would call it's callback periodically (if it returns true to persist the
GSource). As the whole execution is effectively event driven you would
avoid the need for locking.

> +
> +}
> +
> +
> +/* Virtio helpers */
> +static uint64_t rng_get_features(VuDev *dev)
> +{
> +    if (verbose) {
> +        g_info("%s: replying", __func__);
> +    }
> +    return 0;
> +}
> +
> +static void rng_set_features(VuDev *dev, uint64_t features)
> +{
> +    if (verbose && features) {
> +        g_autoptr(GString) s = g_string_new("Requested un-handled feature");
> +        g_string_append_printf(s, " 0x%" PRIx64 "", features);
> +        g_info("%s: %s", __func__, s->str);
> +    }
> +}
> +
> +static void vu_rng_handle_requests(VuDev *dev, int qidx)
> +{
> +    VuRNG *rng = container_of(dev, VuRNG, dev.parent);
> +    VuVirtq *vq = vu_get_queue(dev, qidx);
> +    VuVirtqElement *elem;
> +    size_t to_read;
> +    int len, ret;
> +
> +    for (;;) {
> +        /* Get element in the vhost virtqueue */
> +        elem = vu_queue_pop(dev, vq, sizeof(VuVirtqElement));
> +        if (!elem) {
> +            break;
> +        }
> +
> +        /* Get the amount of entropy to read from the vhost server */
> +        to_read = elem->in_sg[0].iov_len;
> +
> +        pthread_mutex_lock(&rng->rng_mutex);
> +
> +        /*
> +         * We have consumed all entropy available for this time slice.
> +         * Wait for the timer (check_rate_limit()) to tell us about the
> +         * start of a new time slice.
> +         */
> +        if (rng->quota_remaining == 0) {
> +            pthread_cond_wait(&rng->rng_cond, &rng->rng_mutex);
> +        }

Hmm this complicates things. Ideally you wouldn't want to block here on
processing the virtqueue. This will end up block the guest. I'll need to
think about this.
Alex Bennée July 21, 2021, 8:14 p.m. UTC | #2
Mathieu Poirier <mathieu.poirier@linaro.org> writes:

> This patch provides the vhost-user backend implementation to work
> in tandem with the vhost-user-rng implementation of the QEMU VMM.
>
> It uses the vhost-user API so that other VMM can re-use the interface
> without having to write the driver again.
>
> Signed-off-by: Mathieu Poirier <mathieu.poirier@linaro.org>

Try the following patch which creates a nested main loop and runs it
until the g_timeout_add fires again.

--8<---------------cut here---------------start------------->8---
tools/virtio/vhost-user-rng: avoid mutex by using nested main loop

As we are blocking anyway all we really need to do is run a main loop
until the timer fires and the data is consumed.

Signed-off-by: Alex Bennée <alex.bennee@linaro.org>

1 file changed, 30 insertions(+), 76 deletions(-)
tools/vhost-user-rng/main.c | 106 +++++++++++++-------------------------------

modified   tools/vhost-user-rng/main.c
@@ -42,13 +42,10 @@
 
 typedef struct {
     VugDev dev;
-    struct itimerspec ts;
-    timer_t rate_limit_timer;
-    pthread_mutex_t rng_mutex;
-    pthread_cond_t rng_cond;
     int64_t quota_remaining;
-    bool activate_timer;
+    guint timer;
     GMainLoop *loop;
+    GMainLoop *blocked;
 } VuRNG;
 
 static gboolean print_cap, verbose;
@@ -59,66 +56,26 @@ static gint source_fd, socket_fd = -1;
 static uint32_t period_ms = 1 << 16;
 static uint64_t max_bytes = INT64_MAX;
 
-static void check_rate_limit(union sigval sv)
+static gboolean check_rate_limit(gpointer user_data)
 {
-    VuRNG *rng = sv.sival_ptr;
-    bool wakeup = false;
+    VuRNG *rng = (VuRNG *) user_data;
 
-    pthread_mutex_lock(&rng->rng_mutex);
-    /*
-     * The timer has expired and the guest has used all available
-     * entropy, which means function vu_rng_handle_request() is waiting
-     * on us.  As such wake it up once we're done here.
-     */
-    if (rng->quota_remaining == 0) {
-        wakeup = true;
+    if (rng->blocked) {
+        g_info("%s: called while blocked", __func__);
+        g_main_loop_quit(rng->blocked);
     }
-
     /*
      * Reset the entropy available to the guest and tell function
      * vu_rng_handle_requests() to start the timer before using it.
      */
     rng->quota_remaining = max_bytes;
-    rng->activate_timer = true;
-    pthread_mutex_unlock(&rng->rng_mutex);
-
-    if (wakeup) {
-        pthread_cond_signal(&rng->rng_cond);
-    }
-}
-
-static void setup_timer(VuRNG *rng)
-{
-    struct sigevent sev;
-    int ret;
-
-    memset(&rng->ts, 0, sizeof(struct itimerspec));
-    rng->ts.it_value.tv_sec = period_ms / 1000;
-    rng->ts.it_value.tv_nsec = (period_ms % 1000) * 1000000;
-
-    /*
-     * Call function check_rate_limit() as if it was the start of
-     * a new thread when the timer expires.
-     */
-    sev.sigev_notify = SIGEV_THREAD;
-    sev.sigev_notify_function = check_rate_limit;
-    sev.sigev_value.sival_ptr = rng;
-    /* Needs to be NULL if defaults attributes are to be used. */
-    sev.sigev_notify_attributes = NULL;
-    ret = timer_create(CLOCK_MONOTONIC, &sev, &rng->rate_limit_timer);
-    if (ret < 0) {
-        fprintf(stderr, "timer_create() failed\n");
-    }
-
+    return true;
 }
 
-
 /* Virtio helpers */
 static uint64_t rng_get_features(VuDev *dev)
 {
-    if (verbose) {
-        g_info("%s: replying", __func__);
-    }
+    g_info("%s: replying", __func__);
     return 0;
 }
 
@@ -137,7 +94,7 @@ static void vu_rng_handle_requests(VuDev *dev, int qidx)
     VuVirtq *vq = vu_get_queue(dev, qidx);
     VuVirtqElement *elem;
     size_t to_read;
-    int len, ret;
+    int len;
 
     for (;;) {
         /* Get element in the vhost virtqueue */
@@ -149,24 +106,21 @@ static void vu_rng_handle_requests(VuDev *dev, int qidx)
         /* Get the amount of entropy to read from the vhost server */
         to_read = elem->in_sg[0].iov_len;
 
-        pthread_mutex_lock(&rng->rng_mutex);
-
         /*
          * We have consumed all entropy available for this time slice.
          * Wait for the timer (check_rate_limit()) to tell us about the
          * start of a new time slice.
          */
         if (rng->quota_remaining == 0) {
-            pthread_cond_wait(&rng->rng_cond, &rng->rng_mutex);
-        }
-
-        /* Start the timer if the last time slice has expired */
-        if (rng->activate_timer == true) {
-            rng->activate_timer = false;
-            ret = timer_settime(rng->rate_limit_timer, 0, &rng->ts, NULL);
-            if (ret < 0) {
-                fprintf(stderr, "timer_settime() failed\n");
-            }
+            g_assert(!rng->blocked);
+            rng->blocked = g_main_loop_new(g_main_loop_get_context(rng->loop), false);
+            g_info("attempting to consume %ld bytes but no quota left (%s)",
+                   to_read,
+                   g_main_loop_is_running(rng->loop) ? "running" : "not running");
+            g_main_loop_run(rng->blocked);
+            g_info("return from blocked loop: %ld", rng->quota_remaining);
+            g_main_loop_unref(rng->blocked);
+            rng->blocked = false;
         }
 
         /* Make sure we don't read more than it's available */
@@ -183,8 +137,6 @@ static void vu_rng_handle_requests(VuDev *dev, int qidx)
 
         rng->quota_remaining -= len;
 
-        pthread_mutex_unlock(&rng->rng_mutex);
-
         vu_queue_push(dev, vq, elem, len);
         free(elem);
     }
@@ -373,6 +325,7 @@ int main(int argc, char *argv[])
      * can add it's GSource watches.
      */
     rng.loop = g_main_loop_new(NULL, FALSE);
+    rng.blocked = NULL;
 
     if (!vug_init(&rng.dev, 1, g_socket_get_fd(socket),
                   panic, &vuiface)) {
@@ -380,24 +333,25 @@ int main(int argc, char *argv[])
         exit(EXIT_FAILURE);
     }
 
-    rng.quota_remaining = max_bytes;
-    rng.activate_timer = true;
-    pthread_mutex_init(&rng.rng_mutex, NULL);
-    pthread_cond_init(&rng.rng_cond, NULL);
-    setup_timer(&rng);
-
     if (verbose) {
-        g_info("period_ms: %d tv_sec: %ld tv_nsec: %lu\n",
-               period_ms, rng.ts.it_value.tv_sec, rng.ts.it_value.tv_nsec);
+        g_log_set_handler(NULL, G_LOG_LEVEL_MASK, g_log_default_handler, NULL);
+        g_setenv("G_MESSAGES_DEBUG", "all", true);
+    } else {
+        g_log_set_handler(NULL,
+                          G_LOG_LEVEL_WARNING | G_LOG_LEVEL_CRITICAL | G_LOG_LEVEL_ERROR,
+                          g_log_default_handler, NULL);
     }
 
+    rng.quota_remaining = max_bytes;
+    rng.timer = g_timeout_add(period_ms, check_rate_limit, &rng);
+    g_info("period_ms: %"PRId32", timer %d\n", period_ms, rng.timer);
+
     g_message("entering main loop, awaiting messages");
     g_main_loop_run(rng.loop);
     g_message("finished main loop, cleaning up");
 
     g_main_loop_unref(rng.loop);
     vug_deinit(&rng.dev);
-    timer_delete(rng.rate_limit_timer);
     close(source_fd);
     unlink(socket_path);
 }
--8<---------------cut here---------------end--------------->8---
Mathieu Poirier July 22, 2021, 5:54 p.m. UTC | #3
On Wed, Jul 21, 2021 at 09:14:31PM +0100, Alex Bennée wrote:
> 
> Mathieu Poirier <mathieu.poirier@linaro.org> writes:
> 
> > This patch provides the vhost-user backend implementation to work
> > in tandem with the vhost-user-rng implementation of the QEMU VMM.
> >
> > It uses the vhost-user API so that other VMM can re-use the interface
> > without having to write the driver again.
> >
> > Signed-off-by: Mathieu Poirier <mathieu.poirier@linaro.org>
> 
> Try the following patch which creates a nested main loop and runs it
> until the g_timeout_add fires again.
> 
> --8<---------------cut here---------------start------------->8---
> tools/virtio/vhost-user-rng: avoid mutex by using nested main loop
> 
> As we are blocking anyway all we really need to do is run a main loop
> until the timer fires and the data is consumed.
> 

Right, I made the implemenation blocking to be as close as possible to what
virtio-rng does.

I took a look at your patch below and it should do the trick.  Testing yielded
the same results as my solution so this is good.  To me the nested loop is a
little unorthodox to solve this kind of problem but it has less lines of code
and avoids spinning a new thread to deal with the timer.

I'll send another revision.

Thanks for the review,
Mathieu

> Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
> 
> 1 file changed, 30 insertions(+), 76 deletions(-)
> tools/vhost-user-rng/main.c | 106 +++++++++++++-------------------------------
> 
> modified   tools/vhost-user-rng/main.c
> @@ -42,13 +42,10 @@
>  
>  typedef struct {
>      VugDev dev;
> -    struct itimerspec ts;
> -    timer_t rate_limit_timer;
> -    pthread_mutex_t rng_mutex;
> -    pthread_cond_t rng_cond;
>      int64_t quota_remaining;
> -    bool activate_timer;
> +    guint timer;
>      GMainLoop *loop;
> +    GMainLoop *blocked;
>  } VuRNG;
>  
>  static gboolean print_cap, verbose;
> @@ -59,66 +56,26 @@ static gint source_fd, socket_fd = -1;
>  static uint32_t period_ms = 1 << 16;
>  static uint64_t max_bytes = INT64_MAX;
>  
> -static void check_rate_limit(union sigval sv)
> +static gboolean check_rate_limit(gpointer user_data)
>  {
> -    VuRNG *rng = sv.sival_ptr;
> -    bool wakeup = false;
> +    VuRNG *rng = (VuRNG *) user_data;
>  
> -    pthread_mutex_lock(&rng->rng_mutex);
> -    /*
> -     * The timer has expired and the guest has used all available
> -     * entropy, which means function vu_rng_handle_request() is waiting
> -     * on us.  As such wake it up once we're done here.
> -     */
> -    if (rng->quota_remaining == 0) {
> -        wakeup = true;
> +    if (rng->blocked) {
> +        g_info("%s: called while blocked", __func__);
> +        g_main_loop_quit(rng->blocked);
>      }
> -
>      /*
>       * Reset the entropy available to the guest and tell function
>       * vu_rng_handle_requests() to start the timer before using it.
>       */
>      rng->quota_remaining = max_bytes;
> -    rng->activate_timer = true;
> -    pthread_mutex_unlock(&rng->rng_mutex);
> -
> -    if (wakeup) {
> -        pthread_cond_signal(&rng->rng_cond);
> -    }
> -}
> -
> -static void setup_timer(VuRNG *rng)
> -{
> -    struct sigevent sev;
> -    int ret;
> -
> -    memset(&rng->ts, 0, sizeof(struct itimerspec));
> -    rng->ts.it_value.tv_sec = period_ms / 1000;
> -    rng->ts.it_value.tv_nsec = (period_ms % 1000) * 1000000;
> -
> -    /*
> -     * Call function check_rate_limit() as if it was the start of
> -     * a new thread when the timer expires.
> -     */
> -    sev.sigev_notify = SIGEV_THREAD;
> -    sev.sigev_notify_function = check_rate_limit;
> -    sev.sigev_value.sival_ptr = rng;
> -    /* Needs to be NULL if defaults attributes are to be used. */
> -    sev.sigev_notify_attributes = NULL;
> -    ret = timer_create(CLOCK_MONOTONIC, &sev, &rng->rate_limit_timer);
> -    if (ret < 0) {
> -        fprintf(stderr, "timer_create() failed\n");
> -    }
> -
> +    return true;
>  }
>  
> -
>  /* Virtio helpers */
>  static uint64_t rng_get_features(VuDev *dev)
>  {
> -    if (verbose) {
> -        g_info("%s: replying", __func__);
> -    }
> +    g_info("%s: replying", __func__);
>      return 0;
>  }
>  
> @@ -137,7 +94,7 @@ static void vu_rng_handle_requests(VuDev *dev, int qidx)
>      VuVirtq *vq = vu_get_queue(dev, qidx);
>      VuVirtqElement *elem;
>      size_t to_read;
> -    int len, ret;
> +    int len;
>  
>      for (;;) {
>          /* Get element in the vhost virtqueue */
> @@ -149,24 +106,21 @@ static void vu_rng_handle_requests(VuDev *dev, int qidx)
>          /* Get the amount of entropy to read from the vhost server */
>          to_read = elem->in_sg[0].iov_len;
>  
> -        pthread_mutex_lock(&rng->rng_mutex);
> -
>          /*
>           * We have consumed all entropy available for this time slice.
>           * Wait for the timer (check_rate_limit()) to tell us about the
>           * start of a new time slice.
>           */
>          if (rng->quota_remaining == 0) {
> -            pthread_cond_wait(&rng->rng_cond, &rng->rng_mutex);
> -        }
> -
> -        /* Start the timer if the last time slice has expired */
> -        if (rng->activate_timer == true) {
> -            rng->activate_timer = false;
> -            ret = timer_settime(rng->rate_limit_timer, 0, &rng->ts, NULL);
> -            if (ret < 0) {
> -                fprintf(stderr, "timer_settime() failed\n");
> -            }
> +            g_assert(!rng->blocked);
> +            rng->blocked = g_main_loop_new(g_main_loop_get_context(rng->loop), false);
> +            g_info("attempting to consume %ld bytes but no quota left (%s)",
> +                   to_read,
> +                   g_main_loop_is_running(rng->loop) ? "running" : "not running");
> +            g_main_loop_run(rng->blocked);
> +            g_info("return from blocked loop: %ld", rng->quota_remaining);
> +            g_main_loop_unref(rng->blocked);
> +            rng->blocked = false;
>          }
>  
>          /* Make sure we don't read more than it's available */
> @@ -183,8 +137,6 @@ static void vu_rng_handle_requests(VuDev *dev, int qidx)
>  
>          rng->quota_remaining -= len;
>  
> -        pthread_mutex_unlock(&rng->rng_mutex);
> -
>          vu_queue_push(dev, vq, elem, len);
>          free(elem);
>      }
> @@ -373,6 +325,7 @@ int main(int argc, char *argv[])
>       * can add it's GSource watches.
>       */
>      rng.loop = g_main_loop_new(NULL, FALSE);
> +    rng.blocked = NULL;
>  
>      if (!vug_init(&rng.dev, 1, g_socket_get_fd(socket),
>                    panic, &vuiface)) {
> @@ -380,24 +333,25 @@ int main(int argc, char *argv[])
>          exit(EXIT_FAILURE);
>      }
>  
> -    rng.quota_remaining = max_bytes;
> -    rng.activate_timer = true;
> -    pthread_mutex_init(&rng.rng_mutex, NULL);
> -    pthread_cond_init(&rng.rng_cond, NULL);
> -    setup_timer(&rng);
> -
>      if (verbose) {
> -        g_info("period_ms: %d tv_sec: %ld tv_nsec: %lu\n",
> -               period_ms, rng.ts.it_value.tv_sec, rng.ts.it_value.tv_nsec);
> +        g_log_set_handler(NULL, G_LOG_LEVEL_MASK, g_log_default_handler, NULL);
> +        g_setenv("G_MESSAGES_DEBUG", "all", true);
> +    } else {
> +        g_log_set_handler(NULL,
> +                          G_LOG_LEVEL_WARNING | G_LOG_LEVEL_CRITICAL | G_LOG_LEVEL_ERROR,
> +                          g_log_default_handler, NULL);
>      }
>  
> +    rng.quota_remaining = max_bytes;
> +    rng.timer = g_timeout_add(period_ms, check_rate_limit, &rng);
> +    g_info("period_ms: %"PRId32", timer %d\n", period_ms, rng.timer);
> +
>      g_message("entering main loop, awaiting messages");
>      g_main_loop_run(rng.loop);
>      g_message("finished main loop, cleaning up");
>  
>      g_main_loop_unref(rng.loop);
>      vug_deinit(&rng.dev);
> -    timer_delete(rng.rate_limit_timer);
>      close(source_fd);
>      unlink(socket_path);
>  }
> --8<---------------cut here---------------end--------------->8---
> 
> -- 
> Alex Bennée
Alex Bennée July 23, 2021, 9:01 a.m. UTC | #4
Mathieu Poirier <mathieu.poirier@linaro.org> writes:

> On Wed, Jul 21, 2021 at 09:14:31PM +0100, Alex Bennée wrote:
>> 
>> Mathieu Poirier <mathieu.poirier@linaro.org> writes:
>> 
>> > This patch provides the vhost-user backend implementation to work
>> > in tandem with the vhost-user-rng implementation of the QEMU VMM.
>> >
>> > It uses the vhost-user API so that other VMM can re-use the interface
>> > without having to write the driver again.
>> >
>> > Signed-off-by: Mathieu Poirier <mathieu.poirier@linaro.org>
>> 
>> Try the following patch which creates a nested main loop and runs it
>> until the g_timeout_add fires again.
>> 
>> --8<---------------cut here---------------start------------->8---
>> tools/virtio/vhost-user-rng: avoid mutex by using nested main loop
>> 
>> As we are blocking anyway all we really need to do is run a main loop
>> until the timer fires and the data is consumed.
>> 
>
> Right, I made the implemenation blocking to be as close as possible to what
> virtio-rng does.
>
> I took a look at your patch below and it should do the trick.  Testing yielded
> the same results as my solution so this is good.  To me the nested loop is a
> little unorthodox to solve this kind of problem but it has less lines of code
> and avoids spinning a new thread to deal with the timer.

It might be worth considering just running a g_main_context_iteration()
of the main loop:

  https://developer.gnome.org/glib/stable/glib-The-Main-Event-Loop.html#g-main-context-iteration

and then you could avoid the hassle of having a special blocking loop
(and having to special case the quit). However you can't be sure the
quota has filled up so you need to test on the loop. My hack up (on top
of this patch):

--8<---------------cut here---------------start------------->8---
modified   tools/vhost-user-rng/main.c
@@ -45,7 +45,6 @@ typedef struct {
     int64_t quota_remaining;
     guint timer;
     GMainLoop *loop;
-    GMainLoop *blocked;
 } VuRNG;
 
 static gboolean print_cap, verbose;
@@ -59,10 +58,8 @@ static uint64_t max_bytes = INT64_MAX;
 static gboolean check_rate_limit(gpointer user_data)
 {
     VuRNG *rng = (VuRNG *) user_data;
-
-    if (rng->blocked) {
-        g_info("%s: called while blocked", __func__);
-        g_main_loop_quit(rng->blocked);
+    if (!rng->quota_remaining) {
+        g_info("%s: replenishing empty quota", __func__);
     }
     /*
      * Reset the entropy available to the guest and tell function
@@ -112,15 +109,12 @@ static void vu_rng_handle_requests(VuDev *dev, int qidx)
          * start of a new time slice.
          */
         if (rng->quota_remaining == 0) {
-            g_assert(!rng->blocked);
-            rng->blocked = g_main_loop_new(g_main_loop_get_context(rng->loop), false);
-            g_info("attempting to consume %ld bytes but no quota left (%s)",
-                   to_read,
-                   g_main_loop_is_running(rng->loop) ? "running" : "not running");
-            g_main_loop_run(rng->blocked);
-            g_info("return from blocked loop: %ld", rng->quota_remaining);
-            g_main_loop_unref(rng->blocked);
-            rng->blocked = false;
+            g_info("blocking on consuming %ld bytes as no quota", to_read);
+            do {
+                g_main_context_iteration(g_main_loop_get_context(rng->loop),
+                                         true);
+                g_info("return from blocked loop: %ld", rng->quota_remaining);
+            } while (!rng->quota_remaining);
         }
 
         /* Make sure we don't read more than it's available */
@@ -325,7 +319,6 @@ int main(int argc, char *argv[])
      * can add it's GSource watches.
      */
     rng.loop = g_main_loop_new(NULL, FALSE);
-    rng.blocked = NULL;
 
     if (!vug_init(&rng.dev, 1, g_socket_get_fd(socket),
                   panic, &vuiface)) {
--8<---------------cut here---------------end--------------->8---

And then with:

  10:24 root@buster/aarch64  [~] >dd if=/dev/hwrng of=/dev/null status=progress
  77312 bytes (77 kB, 76 KiB) copied, 150 s, 0.5 kB/s

and:

  ./tools/vhost-user-rng/vhost-user-rng --socket-path vrng.sock -v -m 512 -p 1000

I was seeing:

  vhost-user-rng-INFO: 10:27:14.453: blocking on consuming 64 bytes as no quota
  vhost-user-rng-INFO: 10:27:14.453: return from blocked loop: 0
  vhost-user-rng-INFO: 10:27:15.451: check_rate_limit: replenishing empty quota
  vhost-user-rng-INFO: 10:27:15.451: return from blocked loop: 512
  vhost-user-rng-INFO: 10:27:15.457: blocking on consuming 64 bytes as no quota
  vhost-user-rng-INFO: 10:27:15.457: return from blocked loop: 0
  vhost-user-rng-INFO: 10:27:16.453: check_rate_limit: replenishing empty quota
  vhost-user-rng-INFO: 10:27:16.453: return from blocked loop: 512
  vhost-user-rng-INFO: 10:27:16.456: blocking on consuming 64 bytes as no quota
  vhost-user-rng-INFO: 10:27:16.456: return from blocked loop: 0
  vhost-user-rng-INFO: 10:27:17.454: check_rate_limit: replenishing empty quota
  vhost-user-rng-INFO: 10:27:17.454: return from blocked loop: 512
  vhost-user-rng-INFO: 10:27:17.456: blocking on consuming 64 bytes as no quota
  vhost-user-rng-INFO: 10:27:17.456: return from blocked loop: 0

which seemed reasonable enough...

>
> I'll send another revision.
>
> Thanks for the review,
> Mathieu
>
>> Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
>> 
>> 1 file changed, 30 insertions(+), 76 deletions(-)
>> tools/vhost-user-rng/main.c | 106 +++++++++++++-------------------------------
>> 
>> modified   tools/vhost-user-rng/main.c
>> @@ -42,13 +42,10 @@
>>  
>>  typedef struct {
>>      VugDev dev;
>> -    struct itimerspec ts;
>> -    timer_t rate_limit_timer;
>> -    pthread_mutex_t rng_mutex;
>> -    pthread_cond_t rng_cond;
>>      int64_t quota_remaining;
>> -    bool activate_timer;
>> +    guint timer;
>>      GMainLoop *loop;
>> +    GMainLoop *blocked;
>>  } VuRNG;
>>  
>>  static gboolean print_cap, verbose;
>> @@ -59,66 +56,26 @@ static gint source_fd, socket_fd = -1;
>>  static uint32_t period_ms = 1 << 16;
>>  static uint64_t max_bytes = INT64_MAX;
>>  
>> -static void check_rate_limit(union sigval sv)
>> +static gboolean check_rate_limit(gpointer user_data)
>>  {
>> -    VuRNG *rng = sv.sival_ptr;
>> -    bool wakeup = false;
>> +    VuRNG *rng = (VuRNG *) user_data;
>>  
>> -    pthread_mutex_lock(&rng->rng_mutex);
>> -    /*
>> -     * The timer has expired and the guest has used all available
>> -     * entropy, which means function vu_rng_handle_request() is waiting
>> -     * on us.  As such wake it up once we're done here.
>> -     */
>> -    if (rng->quota_remaining == 0) {
>> -        wakeup = true;
>> +    if (rng->blocked) {
>> +        g_info("%s: called while blocked", __func__);
>> +        g_main_loop_quit(rng->blocked);
>>      }
>> -
>>      /*
>>       * Reset the entropy available to the guest and tell function
>>       * vu_rng_handle_requests() to start the timer before using it.
>>       */
>>      rng->quota_remaining = max_bytes;
>> -    rng->activate_timer = true;
>> -    pthread_mutex_unlock(&rng->rng_mutex);
>> -
>> -    if (wakeup) {
>> -        pthread_cond_signal(&rng->rng_cond);
>> -    }
>> -}
>> -
>> -static void setup_timer(VuRNG *rng)
>> -{
>> -    struct sigevent sev;
>> -    int ret;
>> -
>> -    memset(&rng->ts, 0, sizeof(struct itimerspec));
>> -    rng->ts.it_value.tv_sec = period_ms / 1000;
>> -    rng->ts.it_value.tv_nsec = (period_ms % 1000) * 1000000;
>> -
>> -    /*
>> -     * Call function check_rate_limit() as if it was the start of
>> -     * a new thread when the timer expires.
>> -     */
>> -    sev.sigev_notify = SIGEV_THREAD;
>> -    sev.sigev_notify_function = check_rate_limit;
>> -    sev.sigev_value.sival_ptr = rng;
>> -    /* Needs to be NULL if defaults attributes are to be used. */
>> -    sev.sigev_notify_attributes = NULL;
>> -    ret = timer_create(CLOCK_MONOTONIC, &sev, &rng->rate_limit_timer);
>> -    if (ret < 0) {
>> -        fprintf(stderr, "timer_create() failed\n");
>> -    }
>> -
>> +    return true;
>>  }
>>  
>> -
>>  /* Virtio helpers */
>>  static uint64_t rng_get_features(VuDev *dev)
>>  {
>> -    if (verbose) {
>> -        g_info("%s: replying", __func__);
>> -    }
>> +    g_info("%s: replying", __func__);
>>      return 0;
>>  }
>>  
>> @@ -137,7 +94,7 @@ static void vu_rng_handle_requests(VuDev *dev, int qidx)
>>      VuVirtq *vq = vu_get_queue(dev, qidx);
>>      VuVirtqElement *elem;
>>      size_t to_read;
>> -    int len, ret;
>> +    int len;
>>  
>>      for (;;) {
>>          /* Get element in the vhost virtqueue */
>> @@ -149,24 +106,21 @@ static void vu_rng_handle_requests(VuDev *dev, int qidx)
>>          /* Get the amount of entropy to read from the vhost server */
>>          to_read = elem->in_sg[0].iov_len;
>>  
>> -        pthread_mutex_lock(&rng->rng_mutex);
>> -
>>          /*
>>           * We have consumed all entropy available for this time slice.
>>           * Wait for the timer (check_rate_limit()) to tell us about the
>>           * start of a new time slice.
>>           */
>>          if (rng->quota_remaining == 0) {
>> -            pthread_cond_wait(&rng->rng_cond, &rng->rng_mutex);
>> -        }
>> -
>> -        /* Start the timer if the last time slice has expired */
>> -        if (rng->activate_timer == true) {
>> -            rng->activate_timer = false;
>> -            ret = timer_settime(rng->rate_limit_timer, 0, &rng->ts, NULL);
>> -            if (ret < 0) {
>> -                fprintf(stderr, "timer_settime() failed\n");
>> -            }
>> +            g_assert(!rng->blocked);
>> +            rng->blocked = g_main_loop_new(g_main_loop_get_context(rng->loop), false);
>> +            g_info("attempting to consume %ld bytes but no quota left (%s)",
>> +                   to_read,
>> +                   g_main_loop_is_running(rng->loop) ? "running" : "not running");
>> +            g_main_loop_run(rng->blocked);
>> +            g_info("return from blocked loop: %ld", rng->quota_remaining);
>> +            g_main_loop_unref(rng->blocked);
>> +            rng->blocked = false;
>>          }
>>  
>>          /* Make sure we don't read more than it's available */
>> @@ -183,8 +137,6 @@ static void vu_rng_handle_requests(VuDev *dev, int qidx)
>>  
>>          rng->quota_remaining -= len;
>>  
>> -        pthread_mutex_unlock(&rng->rng_mutex);
>> -
>>          vu_queue_push(dev, vq, elem, len);
>>          free(elem);
>>      }
>> @@ -373,6 +325,7 @@ int main(int argc, char *argv[])
>>       * can add it's GSource watches.
>>       */
>>      rng.loop = g_main_loop_new(NULL, FALSE);
>> +    rng.blocked = NULL;
>>  
>>      if (!vug_init(&rng.dev, 1, g_socket_get_fd(socket),
>>                    panic, &vuiface)) {
>> @@ -380,24 +333,25 @@ int main(int argc, char *argv[])
>>          exit(EXIT_FAILURE);
>>      }
>>  
>> -    rng.quota_remaining = max_bytes;
>> -    rng.activate_timer = true;
>> -    pthread_mutex_init(&rng.rng_mutex, NULL);
>> -    pthread_cond_init(&rng.rng_cond, NULL);
>> -    setup_timer(&rng);
>> -
>>      if (verbose) {
>> -        g_info("period_ms: %d tv_sec: %ld tv_nsec: %lu\n",
>> -               period_ms, rng.ts.it_value.tv_sec, rng.ts.it_value.tv_nsec);
>> +        g_log_set_handler(NULL, G_LOG_LEVEL_MASK, g_log_default_handler, NULL);
>> +        g_setenv("G_MESSAGES_DEBUG", "all", true);
>> +    } else {
>> +        g_log_set_handler(NULL,
>> +                          G_LOG_LEVEL_WARNING | G_LOG_LEVEL_CRITICAL | G_LOG_LEVEL_ERROR,
>> +                          g_log_default_handler, NULL);
>>      }
>>  
>> +    rng.quota_remaining = max_bytes;
>> +    rng.timer = g_timeout_add(period_ms, check_rate_limit, &rng);
>> +    g_info("period_ms: %"PRId32", timer %d\n", period_ms, rng.timer);
>> +
>>      g_message("entering main loop, awaiting messages");
>>      g_main_loop_run(rng.loop);
>>      g_message("finished main loop, cleaning up");
>>  
>>      g_main_loop_unref(rng.loop);
>>      vug_deinit(&rng.dev);
>> -    timer_delete(rng.rate_limit_timer);
>>      close(source_fd);
>>      unlink(socket_path);
>>  }
>> --8<---------------cut here---------------end--------------->8---
>> 
>> -- 
>> Alex Bennée
diff mbox series

Patch

diff --git a/tools/meson.build b/tools/meson.build
index 3e5a0abfa29f..66b0a11fbb45 100644
--- a/tools/meson.build
+++ b/tools/meson.build
@@ -24,3 +24,11 @@  endif
 if have_virtiofsd
   subdir('virtiofsd')
 endif
+
+have_virtiorng = (have_system and
+    have_tools and
+    'CONFIG_LINUX' in config_host)
+
+if have_virtiorng
+  subdir('vhost-user-rng')
+endif
diff --git a/tools/vhost-user-rng/50-qemu-rng.json.in b/tools/vhost-user-rng/50-qemu-rng.json.in
new file mode 100644
index 000000000000..9186c3c6fe1d
--- /dev/null
+++ b/tools/vhost-user-rng/50-qemu-rng.json.in
@@ -0,0 +1,5 @@ 
+{
+  "description": "QEMU vhost-user-rng",
+  "type": "bridge",
+  "binary": "@libexecdir@/vhost-user-rng"
+}
diff --git a/tools/vhost-user-rng/main.c b/tools/vhost-user-rng/main.c
new file mode 100644
index 000000000000..c3b8f6922757
--- /dev/null
+++ b/tools/vhost-user-rng/main.c
@@ -0,0 +1,403 @@ 
+/*
+ * VIRTIO RNG Emulation via vhost-user
+ *
+ * Copyright (c) 2021 Mathieu Poirier <mathieu.poirier@linaro.org>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#define G_LOG_DOMAIN "vhost-user-rng"
+#define G_LOG_USE_STRUCTURED 1
+
+#include <glib.h>
+#include <gio/gio.h>
+#include <gio/gunixsocketaddress.h>
+#include <glib-unix.h>
+#include <glib/gstdio.h>
+#include <pthread.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <string.h>
+#include <inttypes.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/mman.h>
+#include <time.h>
+#include <unistd.h>
+#include <endian.h>
+#include <assert.h>
+
+#include "qemu/cutils.h"
+#include "subprojects/libvhost-user/libvhost-user-glib.h"
+#include "subprojects/libvhost-user/libvhost-user.h"
+
+#ifndef container_of
+#define container_of(ptr, type, member) ({                      \
+        const typeof(((type *) 0)->member) * __mptr = (ptr);     \
+        (type *) ((char *) __mptr - offsetof(type, member)); })
+#endif
+
+typedef struct {
+    VugDev dev;
+    struct itimerspec ts;
+    timer_t rate_limit_timer;
+    pthread_mutex_t rng_mutex;
+    pthread_cond_t rng_cond;
+    int64_t quota_remaining;
+    bool activate_timer;
+    GMainLoop *loop;
+} VuRNG;
+
+static gboolean print_cap, verbose;
+static gchar *source_path, *socket_path;
+static gint source_fd, socket_fd = -1;
+
+/* Defaults tailored on virtio-rng.c */
+static uint32_t period_ms = 1 << 16;
+static uint64_t max_bytes = INT64_MAX;
+
+static void check_rate_limit(union sigval sv)
+{
+    VuRNG *rng = sv.sival_ptr;
+    bool wakeup = false;
+
+    pthread_mutex_lock(&rng->rng_mutex);
+    /*
+     * The timer has expired and the guest has used all available
+     * entropy, which means function vu_rng_handle_request() is waiting
+     * on us.  As such wake it up once we're done here.
+     */
+    if (rng->quota_remaining == 0) {
+        wakeup = true;
+    }
+
+    /*
+     * Reset the entropy available to the guest and tell function
+     * vu_rng_handle_requests() to start the timer before using it.
+     */
+    rng->quota_remaining = max_bytes;
+    rng->activate_timer = true;
+    pthread_mutex_unlock(&rng->rng_mutex);
+
+    if (wakeup) {
+        pthread_cond_signal(&rng->rng_cond);
+    }
+}
+
+static void setup_timer(VuRNG *rng)
+{
+    struct sigevent sev;
+    int ret;
+
+    memset(&rng->ts, 0, sizeof(struct itimerspec));
+    rng->ts.it_value.tv_sec = period_ms / 1000;
+    rng->ts.it_value.tv_nsec = (period_ms % 1000) * 1000000;
+
+    /*
+     * Call function check_rate_limit() as if it was the start of
+     * a new thread when the timer expires.
+     */
+    sev.sigev_notify = SIGEV_THREAD;
+    sev.sigev_notify_function = check_rate_limit;
+    sev.sigev_value.sival_ptr = rng;
+    /* Needs to be NULL if defaults attributes are to be used. */
+    sev.sigev_notify_attributes = NULL;
+    ret = timer_create(CLOCK_MONOTONIC, &sev, &rng->rate_limit_timer);
+    if (ret < 0) {
+        fprintf(stderr, "timer_create() failed\n");
+    }
+
+}
+
+
+/* Virtio helpers */
+static uint64_t rng_get_features(VuDev *dev)
+{
+    if (verbose) {
+        g_info("%s: replying", __func__);
+    }
+    return 0;
+}
+
+static void rng_set_features(VuDev *dev, uint64_t features)
+{
+    if (verbose && features) {
+        g_autoptr(GString) s = g_string_new("Requested un-handled feature");
+        g_string_append_printf(s, " 0x%" PRIx64 "", features);
+        g_info("%s: %s", __func__, s->str);
+    }
+}
+
+static void vu_rng_handle_requests(VuDev *dev, int qidx)
+{
+    VuRNG *rng = container_of(dev, VuRNG, dev.parent);
+    VuVirtq *vq = vu_get_queue(dev, qidx);
+    VuVirtqElement *elem;
+    size_t to_read;
+    int len, ret;
+
+    for (;;) {
+        /* Get element in the vhost virtqueue */
+        elem = vu_queue_pop(dev, vq, sizeof(VuVirtqElement));
+        if (!elem) {
+            break;
+        }
+
+        /* Get the amount of entropy to read from the vhost server */
+        to_read = elem->in_sg[0].iov_len;
+
+        pthread_mutex_lock(&rng->rng_mutex);
+
+        /*
+         * We have consumed all entropy available for this time slice.
+         * Wait for the timer (check_rate_limit()) to tell us about the
+         * start of a new time slice.
+         */
+        if (rng->quota_remaining == 0) {
+            pthread_cond_wait(&rng->rng_cond, &rng->rng_mutex);
+        }
+
+        /* Start the timer if the last time slice has expired */
+        if (rng->activate_timer == true) {
+            rng->activate_timer = false;
+            ret = timer_settime(rng->rate_limit_timer, 0, &rng->ts, NULL);
+            if (ret < 0) {
+                fprintf(stderr, "timer_settime() failed\n");
+            }
+        }
+
+        /* Make sure we don't read more than it's available */
+        if (rng->quota_remaining < to_read) {
+            to_read = rng->quota_remaining;
+        }
+
+        len = read(source_fd, elem->in_sg[0].iov_base, to_read);
+
+        /* Simply return 0 if an error occurs */
+        if (len < 0) {
+            len = 0;
+        }
+
+        rng->quota_remaining -= len;
+
+        pthread_mutex_unlock(&rng->rng_mutex);
+
+        vu_queue_push(dev, vq, elem, len);
+        free(elem);
+    }
+
+    vu_queue_notify(dev, vq);
+}
+
+static void
+vu_rng_queue_set_started(VuDev *dev, int qidx, bool started)
+{
+    VuVirtq *vq = vu_get_queue(dev, qidx);
+
+    g_debug("queue started %d:%d\n", qidx, started);
+
+    if (!qidx) {
+        vu_set_queue_handler(dev, vq, started ? vu_rng_handle_requests : NULL);
+    }
+}
+
+/*
+ * Any messages not handled here are processed by the libvhost library
+ * itself.
+ */
+static int rng_process_msg(VuDev *dev, VhostUserMsg *msg, int *do_reply)
+{
+    VuRNG *rng = container_of(dev, VuRNG, dev.parent);
+
+    if (msg->request == VHOST_USER_NONE) {
+        g_main_loop_quit(rng->loop);
+        return 1;
+    }
+
+    return 0;
+}
+
+static const VuDevIface vuiface = {
+    .set_features = rng_set_features,
+    .get_features = rng_get_features,
+    .queue_set_started = vu_rng_queue_set_started,
+    .process_msg = rng_process_msg,
+};
+
+static gboolean hangup(gpointer user_data)
+{
+    GMainLoop *loop = (GMainLoop *) user_data;
+
+    g_printerr("%s: caught hangup/quit signal, quitting", __func__);
+    g_main_loop_quit(loop);
+    return true;
+}
+
+static void panic(VuDev *dev, const char *msg)
+{
+    g_critical("%s\n", msg);
+    exit(EXIT_FAILURE);
+}
+
+/* Print vhost-user.json backend program capabilities */
+static void print_capabilities(void)
+{
+    printf("{\n");
+    printf("  \"type\": \"RNG\"\n");
+    printf("  \"filename\": [ RNG source ]\n");
+    printf("}\n");
+}
+
+static GOptionEntry options[] = {
+    { "socket-path", 's', 0, G_OPTION_ARG_FILENAME, &socket_path,
+      "Location of vhost-user Unix domain socket, incompatible with --fd",
+      "PATH" },
+    { "fd", 'f', 0, G_OPTION_ARG_INT, &socket_fd,
+      "Specify the backend file-descriptor, incompatible with --socket-path",
+      "FD" },
+    { "period", 'p', 0, G_OPTION_ARG_INT, &period_ms,
+      "Time needed (in ms) to transfer a maximum amount of byte", NULL },
+    { "max-bytes", 'm', 0, G_OPTION_ARG_INT64, &max_bytes,
+      "Maximum amount of byte that can be transferred in a period", NULL },
+    { "filename", 'n', 0, G_OPTION_ARG_FILENAME, &source_path,
+      "RNG source, defaults to /dev/urandom", "PATH" },
+    { "print-capabilities", 'c', 0, G_OPTION_ARG_NONE, &print_cap,
+      "Output to stdout the backend capabilities in JSON format and exit",
+      NULL},
+    { "verbose", 'v', 0, G_OPTION_ARG_NONE, &verbose,
+      "Be more verbose in output", NULL},
+    { NULL }
+};
+
+int main(int argc, char *argv[])
+{
+    GError *error = NULL;
+    GOptionContext *context;
+    g_autoptr(GSocket) socket = NULL;
+    char default_source[] = "/dev/urandom";
+    char *source = default_source;
+    VuRNG rng;
+
+    context = g_option_context_new("vhost-user emulation of RNG device");
+    g_option_context_add_main_entries(context, options, "vhost-user-rng");
+    if (!g_option_context_parse(context, &argc, &argv, &error)) {
+        g_printerr("option parsing failed: %s\n", error->message);
+        exit(1);
+    }
+
+    if (print_cap) {
+        print_capabilities();
+        exit(0);
+    }
+
+    if (!socket_path && socket_fd < 0) {
+        g_printerr("Please specify either --fd or --socket-path\n");
+        exit(EXIT_FAILURE);
+    }
+
+    if (socket_path && socket_fd > 0) {
+        g_printerr("Either --fd or --socket-path, not both\n");
+        exit(EXIT_FAILURE);
+    }
+
+    if (max_bytes > INT64_MAX) {
+        g_printerr("'max-bytes' parameter must be non-negative, "
+                   "and less than 2^63\n");
+        exit(EXIT_FAILURE);
+    }
+
+    if (period_ms <= 0) {
+        g_printerr("'period' parameter expects a positive integer\n");
+        exit(EXIT_FAILURE);
+    }
+
+    /*
+     * Now create a vhost-user socket that we will receive messages
+     * on. Once we have our handler set up we can enter the glib main
+     * loop.
+     */
+    if (socket_path) {
+        g_autoptr(GSocketAddress) addr = g_unix_socket_address_new(socket_path);
+        g_autoptr(GSocket) bind_socket = g_socket_new(G_SOCKET_FAMILY_UNIX,
+                                                      G_SOCKET_TYPE_STREAM,
+                                                      G_SOCKET_PROTOCOL_DEFAULT,
+                                                      &error);
+
+        if (!g_socket_bind(bind_socket, addr, false, &error)) {
+            g_printerr("Failed to bind to socket at %s (%s).\n",
+                       socket_path, error->message);
+            exit(EXIT_FAILURE);
+        }
+        if (!g_socket_listen(bind_socket, &error)) {
+            g_printerr("Failed to listen on socket %s (%s).\n",
+                       socket_path, error->message);
+        }
+        g_message("awaiting connection to %s", socket_path);
+        socket = g_socket_accept(bind_socket, NULL, &error);
+        if (!socket) {
+            g_printerr("Failed to accept on socket %s (%s).\n",
+                       socket_path, error->message);
+        }
+    } else {
+        socket = g_socket_new_from_fd(socket_fd, &error);
+        if (!socket) {
+            g_printerr("Failed to connect to FD %d (%s).\n",
+                       socket_fd, error->message);
+            exit(EXIT_FAILURE);
+        }
+    }
+
+    /* Overwrite default RNG source with what user provided, if any */
+    if (source_path) {
+        source = source_path;
+    }
+
+    source_fd = open(source, O_RDWR);
+    if (source_fd < 0) {
+        g_printerr("Failed to open RNG source %s\n", source);
+        g_socket_close(socket, &error);
+        unlink(socket_path);
+        exit(EXIT_FAILURE);
+    }
+
+    /* catch exit signals */
+    g_unix_signal_add(SIGHUP, hangup, rng.loop);
+    g_unix_signal_add(SIGINT, hangup, rng.loop);
+
+    /*
+     * Create the main loop first so all the various sources can be
+     * added. As well as catching signals we need to ensure vug_init
+     * can add it's GSource watches.
+     */
+    rng.loop = g_main_loop_new(NULL, FALSE);
+
+    if (!vug_init(&rng.dev, 1, g_socket_get_fd(socket),
+                  panic, &vuiface)) {
+        g_printerr("Failed to initialize libvhost-user-glib.\n");
+        exit(EXIT_FAILURE);
+    }
+
+    rng.quota_remaining = max_bytes;
+    rng.activate_timer = true;
+    pthread_mutex_init(&rng.rng_mutex, NULL);
+    pthread_cond_init(&rng.rng_cond, NULL);
+    setup_timer(&rng);
+
+    if (verbose) {
+        g_info("period_ms: %d tv_sec: %ld tv_nsec: %lu\n",
+               period_ms, rng.ts.it_value.tv_sec, rng.ts.it_value.tv_nsec);
+    }
+
+    g_message("entering main loop, awaiting messages");
+    g_main_loop_run(rng.loop);
+    g_message("finished main loop, cleaning up");
+
+    g_main_loop_unref(rng.loop);
+    vug_deinit(&rng.dev);
+    timer_delete(rng.rate_limit_timer);
+    close(source_fd);
+    unlink(socket_path);
+}
diff --git a/tools/vhost-user-rng/meson.build b/tools/vhost-user-rng/meson.build
new file mode 100644
index 000000000000..4dc386daf335
--- /dev/null
+++ b/tools/vhost-user-rng/meson.build
@@ -0,0 +1,10 @@ 
+executable('vhost-user-rng', files(
+  'main.c'),
+  dependencies: [qemuutil, glib, gio, rt],
+  install: true,
+  install_dir: get_option('libexecdir'))
+
+configure_file(input: '50-qemu-rng.json.in',
+               output: '50-qemu-rng.json',
+               configuration: config_host,
+               install_dir: qemu_datadir / 'vhost-user')