diff mbox

[18/18] test: add shutdown support vubr test

Message ID 1459509388-6185-19-git-send-email-marcandre.lureau@redhat.com (mailing list archive)
State New, archived
Headers show

Commit Message

Marc-André Lureau April 1, 2016, 11:16 a.m. UTC
From: Marc-André Lureau <marcandre.lureau@redhat.com>

The bridge can now be interrupted with ctrl-c. Once the slave channel is
up, it will request a shutdown, and wait for success reply to exit.

Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com>
---
 tests/vhost-user-bridge.c | 102 ++++++++++++++++++++++++++++++++++++++++++++--
 1 file changed, 99 insertions(+), 3 deletions(-)

Comments

Yuanhan Liu April 13, 2016, 2:52 a.m. UTC | #1
On Fri, Apr 01, 2016 at 01:16:28PM +0200, marcandre.lureau@redhat.com wrote:
> +static void
> +vubr_handle_slave_reply(VhostUserMsg *vmsg)
> +{
> +    DPRINT(
> +        "==================   Vhost slave reply from QEMU   ==================\n");
> +    DPRINT("Request: %s (%d)\n", vubr_slave_request_str[vmsg->request],
> +           vmsg->request);
> +    DPRINT("Flags:   0x%x\n", vmsg->flags);
> +    DPRINT("Size:    %d\n", vmsg->size);
> +
> +    switch (vmsg->request) {
> +    case VHOST_USER_SLAVE_SHUTDOWN:
> +        DPRINT("Shutdown success: 0x%016"PRIx64"\n", vmsg->payload.u64);
> +        if (vmsg->payload.u64 == 0) {
> +            exit(0);
> +        }
> +    default:
> +        DPRINT("Invalid slave reply");
> +    };
       ^^

Minor nit: redundant ';'.

	--yliu
diff mbox

Patch

diff --git a/tests/vhost-user-bridge.c b/tests/vhost-user-bridge.c
index 42450a6..ea123be 100644
--- a/tests/vhost-user-bridge.c
+++ b/tests/vhost-user-bridge.c
@@ -135,6 +135,9 @@  dispatcher_wait(Dispatcher *dispr, uint32_t timeout)
     int rc = select(dispr->max_sock + 1, &fdset, 0, 0, &tv);
 
     if (rc == -1) {
+        if (errno == EINTR) {
+            return 0;
+        }
         vubr_die("select");
     }
 
@@ -186,6 +189,7 @@  enum VhostUserProtocolFeature {
     VHOST_USER_PROTOCOL_F_MQ = 0,
     VHOST_USER_PROTOCOL_F_LOG_SHMFD = 1,
     VHOST_USER_PROTOCOL_F_RARP = 2,
+    VHOST_USER_PROTOCOL_F_SLAVE_CHANNEL = 3,
 
     VHOST_USER_PROTOCOL_F_MAX
 };
@@ -213,9 +217,16 @@  typedef enum VhostUserRequest {
     VHOST_USER_GET_QUEUE_NUM = 17,
     VHOST_USER_SET_VRING_ENABLE = 18,
     VHOST_USER_SEND_RARP = 19,
+    VHOST_USER_SET_SLAVE_FD = 20,
     VHOST_USER_MAX
 } VhostUserRequest;
 
+typedef enum VhostUserSlaveRequest {
+    VHOST_USER_SLAVE_NONE = 0,
+    VHOST_USER_SLAVE_SHUTDOWN = 1,
+    VHOST_USER_SLAVE_MAX
+} VhostUserSlaveRequest;
+
 typedef struct VhostUserMemoryRegion {
     uint64_t guest_phys_addr;
     uint64_t memory_size;
@@ -288,6 +299,8 @@  typedef struct VubrDev {
     int ready;
     uint64_t features;
     int hdrlen;
+    int slave_fd;
+    bool shutdown_requested;
 } VubrDev;
 
 static const char *vubr_request_str[] = {
@@ -311,7 +324,14 @@  static const char *vubr_request_str[] = {
     [VHOST_USER_GET_QUEUE_NUM]          =  "VHOST_USER_GET_QUEUE_NUM",
     [VHOST_USER_SET_VRING_ENABLE]       =  "VHOST_USER_SET_VRING_ENABLE",
     [VHOST_USER_SEND_RARP]              =  "VHOST_USER_SEND_RARP",
-    [VHOST_USER_MAX]                    =  "VHOST_USER_MAX",
+    [VHOST_USER_SET_SLAVE_FD]           =  "VHOST_USER_SET_SLAVE_FD",
+    [VHOST_USER_MAX]                    =  "VHOST_USER_MAX"
+};
+
+static const char *vubr_slave_request_str[] = {
+    [VHOST_USER_SLAVE_NONE]             =  "VHOST_USER_SLAVE_NONE",
+    [VHOST_USER_SLAVE_SHUTDOWN]         =  "VHOST_USER_SLAVE_SHUTDOWN",
+    [VHOST_USER_SLAVE_MAX]              =  "VHOST_USER_SLAVE_MAX"
 };
 
 static void
@@ -638,7 +658,7 @@  vubr_process_desc(VubrDev *dev, VubrVirtq *vq)
     size_t buf_size = 4096;
     uint8_t buf[4096];
 
-    DPRINT("Chunks: ");
+    DPRINT("Chunks: aidx:%d   ", a_index);
     i = d_index;
     do {
         void *chunk_start = (void *)(uintptr_t)gpa_to_va(dev, desc[i].addr);
@@ -1063,7 +1083,9 @@  vubr_set_vring_err_exec(VubrDev *dev, VhostUserMsg *vmsg)
 static int
 vubr_get_protocol_features_exec(VubrDev *dev, VhostUserMsg *vmsg)
 {
-    vmsg->payload.u64 = 1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD;
+    vmsg->payload.u64 =
+        1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD |
+        1ULL << VHOST_USER_PROTOCOL_F_SLAVE_CHANNEL;
     DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64);
     vmsg->size = sizeof(vmsg->payload.u64);
 
@@ -1105,6 +1127,46 @@  vubr_send_rarp_exec(VubrDev *dev, VhostUserMsg *vmsg)
     return 0;
 }
 
+static void
+vubr_handle_slave_reply(VhostUserMsg *vmsg)
+{
+    DPRINT(
+        "==================   Vhost slave reply from QEMU   ==================\n");
+    DPRINT("Request: %s (%d)\n", vubr_slave_request_str[vmsg->request],
+           vmsg->request);
+    DPRINT("Flags:   0x%x\n", vmsg->flags);
+    DPRINT("Size:    %d\n", vmsg->size);
+
+    switch (vmsg->request) {
+    case VHOST_USER_SLAVE_SHUTDOWN:
+        DPRINT("Shutdown success: 0x%016"PRIx64"\n", vmsg->payload.u64);
+        if (vmsg->payload.u64 == 0) {
+            exit(0);
+        }
+    default:
+        DPRINT("Invalid slave reply");
+    };
+}
+
+static void
+slave_receive_cb(int sock, void *ctx)
+{
+    VhostUserMsg vmsg;
+
+    vubr_message_read(sock, &vmsg);
+    vubr_handle_slave_reply(&vmsg);
+}
+
+static int
+vubr_set_slave_fd_exec(VubrDev *dev, VhostUserMsg *vmsg)
+{
+    assert(vmsg->fd_num == 1);
+    dev->slave_fd = vmsg->fds[0];
+    DPRINT("Got slave_fd: %d\n", vmsg->fds[0]);
+    dispatcher_add(&dev->dispatcher, dev->slave_fd, dev, slave_receive_cb);
+    return 0;
+}
+
 static int
 vubr_execute_request(VubrDev *dev, VhostUserMsg *vmsg)
 {
@@ -1166,6 +1228,8 @@  vubr_execute_request(VubrDev *dev, VhostUserMsg *vmsg)
         return vubr_set_vring_enable_exec(dev, vmsg);
     case VHOST_USER_SEND_RARP:
         return vubr_send_rarp_exec(dev, vmsg);
+    case VHOST_USER_SET_SLAVE_FD:
+        return vubr_set_slave_fd_exec(dev, vmsg);
 
     case VHOST_USER_MAX:
         assert(vmsg->request != VHOST_USER_MAX);
@@ -1226,6 +1290,7 @@  vubr_new(const char *path)
         };
     }
 
+    dev->slave_fd = -1;
     /* Init log */
     dev->log_call_fd = -1;
     dev->log_size = 0;
@@ -1333,12 +1398,42 @@  vubr_backend_udp_setup(VubrDev *dev,
 }
 
 static void
+vubr_request_shutdown(VubrDev *dev)
+{
+    VhostUserMsg vmsg = {
+        .request = VHOST_USER_SLAVE_SHUTDOWN,
+        .flags = VHOST_USER_VERSION
+    };
+
+    DPRINT("requesting shutdown\n");
+    vubr_message_write(dev->slave_fd, &vmsg);
+}
+
+static volatile int interrupted;
+
+static void interrupt_handler(int dummy)
+{
+    interrupted = 1;
+}
+
+static void
 vubr_run(VubrDev *dev)
 {
     while (1) {
         /* timeout 200ms */
         dispatcher_wait(&dev->dispatcher, 200000);
         /* Here one can try polling strategy. */
+
+        if (interrupted) {
+            if (dev->slave_fd == -1) {
+                return;
+            }
+
+            if (!dev->shutdown_requested) {
+                vubr_request_shutdown(dev);
+                dev->shutdown_requested = 1;
+            }
+        }
     }
 }
 
@@ -1405,6 +1500,7 @@  main(int argc, char *argv[])
     }
 
     vubr_backend_udp_setup(dev, lhost, lport, rhost, rport);
+    signal(SIGINT, interrupt_handler);
     vubr_run(dev);
     return 0;