@@ -24,6 +24,7 @@
#include "qemu/main-loop.h"
#include "qemu/uuid.h"
#include "qemu/sockets.h"
+#include "qemu/lockable.h"
#include "sysemu/runstate.h"
#include "sysemu/cryptodev.h"
#include "migration/postcopy-ram.h"
@@ -446,6 +447,10 @@ static int vhost_user_set_log_base(struct vhost_dev *dev, uint64_t base,
.hdr.size = sizeof(msg.payload.log),
};
+ struct vhost_user *u = dev->opaque;
+ struct VhostUserState *us = u->user;
+ QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
+
/* Send only once with first queue pair */
if (dev->vq_index != 0) {
return 0;
@@ -669,6 +674,9 @@ static int send_remove_regions(struct vhost_dev *dev,
ram_addr_t offset;
VhostUserMemoryRegion region_buffer;
+ struct VhostUserState *us = u->user;
+ QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
+
/*
* The regions in remove_reg appear in the same order they do in the
* shadow table. Therefore we can minimize memory copies by iterating
@@ -725,6 +733,9 @@ static int send_add_regions(struct vhost_dev *dev,
VhostUserMsg msg_reply;
VhostUserMemoryRegion region_buffer;
+ struct VhostUserState *us = u->user;
+ QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
+
for (i = 0; i < nr_add_reg; i++) {
reg = add_reg[i].region;
reg_idx = add_reg[i].reg_idx;
@@ -903,6 +914,9 @@ static int vhost_user_set_mem_table_postcopy(struct vhost_dev *dev,
.hdr.flags = VHOST_USER_VERSION,
};
+ struct VhostUserState *us = u->user;
+ QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
+
if (u->region_rb_len < dev->mem->nregions) {
u->region_rb = g_renew(RAMBlock*, u->region_rb, dev->mem->nregions);
u->region_rb_offset = g_renew(ram_addr_t, u->region_rb_offset,
@@ -1028,6 +1042,9 @@ static int vhost_user_set_mem_table(struct vhost_dev *dev,
.hdr.flags = VHOST_USER_VERSION,
};
+ struct VhostUserState *us = u->user;
+ QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
+
if (reply_supported) {
msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
}
@@ -1089,6 +1106,10 @@ static int vhost_user_get_u64(struct vhost_dev *dev, int request, uint64_t *u64)
return 0;
}
+ struct vhost_user *u = dev->opaque;
+ struct VhostUserState *us = u->user;
+ QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
+
ret = vhost_user_write(dev, &msg, NULL, 0);
if (ret < 0) {
return ret;
@@ -1138,6 +1159,10 @@ static int vhost_user_write_sync(struct vhost_dev *dev, VhostUserMsg *msg,
}
}
+ struct vhost_user *u = dev->opaque;
+ struct VhostUserState *us = u->user;
+ QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
+
ret = vhost_user_write(dev, msg, NULL, 0);
if (ret < 0) {
return ret;
@@ -1277,6 +1302,8 @@ static int vhost_user_get_vring_base(struct vhost_dev *dev,
.hdr.size = sizeof(msg.payload.state),
};
struct vhost_user *u = dev->opaque;
+ struct VhostUserState *us = u->user;
+ QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
VhostUserHostNotifier *n = fetch_notifier(u->user, ring->index);
if (n) {
@@ -1669,6 +1696,9 @@ int vhost_user_get_shared_object(struct vhost_dev *dev, unsigned char *uuid,
};
memcpy(msg.payload.object.uuid, uuid, sizeof(msg.payload.object.uuid));
+ struct VhostUserState *us = u->user;
+ QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
+
ret = vhost_user_write(dev, &msg, NULL, 0);
if (ret < 0) {
return ret;
@@ -1889,6 +1919,9 @@ static int vhost_setup_backend_channel(struct vhost_dev *dev)
msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
}
+ struct VhostUserState *us = u->user;
+ QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
+
ret = vhost_user_write(dev, &msg, &sv[1], 1);
if (ret) {
goto out;
@@ -1993,6 +2026,9 @@ static int vhost_user_postcopy_advise(struct vhost_dev *dev, Error **errp)
.hdr.flags = VHOST_USER_VERSION,
};
+ struct VhostUserState *us = u->user;
+ QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
+
ret = vhost_user_write(dev, &msg, NULL, 0);
if (ret < 0) {
error_setg(errp, "Failed to send postcopy_advise to vhost");
@@ -2051,6 +2087,9 @@ static int vhost_user_postcopy_listen(struct vhost_dev *dev, Error **errp)
trace_vhost_user_postcopy_listen();
+ struct VhostUserState *us = u->user;
+ QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
+
ret = vhost_user_write(dev, &msg, NULL, 0);
if (ret < 0) {
error_setg(errp, "Failed to send postcopy_listen to vhost");
@@ -2080,6 +2119,9 @@ static int vhost_user_postcopy_end(struct vhost_dev *dev, Error **errp)
trace_vhost_user_postcopy_end_entry();
+ struct VhostUserState *us = u->user;
+ QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
+
ret = vhost_user_write(dev, &msg, NULL, 0);
if (ret < 0) {
error_setg(errp, "Failed to send postcopy_end to vhost");
@@ -2372,6 +2414,10 @@ static int vhost_user_net_set_mtu(struct vhost_dev *dev, uint16_t mtu)
msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
}
+ struct vhost_user *u = dev->opaque;
+ struct VhostUserState *us = u->user;
+ QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
+
ret = vhost_user_write(dev, &msg, NULL, 0);
if (ret < 0) {
return ret;
@@ -2396,6 +2442,10 @@ static int vhost_user_send_device_iotlb_msg(struct vhost_dev *dev,
.payload.iotlb = *imsg,
};
+ struct vhost_user *u = dev->opaque;
+ struct VhostUserState *us = u->user;
+ QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
+
ret = vhost_user_write(dev, &msg, NULL, 0);
if (ret < 0) {
return ret;
@@ -2428,6 +2478,10 @@ static int vhost_user_get_config(struct vhost_dev *dev, uint8_t *config,
assert(config_len <= VHOST_USER_MAX_CONFIG_SIZE);
+ struct vhost_user *u = dev->opaque;
+ struct VhostUserState *us = u->user;
+ QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
+
msg.payload.config.offset = 0;
msg.payload.config.size = config_len;
ret = vhost_user_write(dev, &msg, NULL, 0);
@@ -2492,6 +2546,10 @@ static int vhost_user_set_config(struct vhost_dev *dev, const uint8_t *data,
p = msg.payload.config.region;
memcpy(p, data, size);
+ struct vhost_user *u = dev->opaque;
+ struct VhostUserState *us = u->user;
+ QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
+
ret = vhost_user_write(dev, &msg, NULL, 0);
if (ret < 0) {
return ret;
@@ -2570,6 +2628,10 @@ static int vhost_user_crypto_create_session(struct vhost_dev *dev,
}
}
+ struct vhost_user *u = dev->opaque;
+ struct VhostUserState *us = u->user;
+ QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
+
msg.payload.session.op_code = backend_info->op_code;
msg.payload.session.session_id = backend_info->session_id;
ret = vhost_user_write(dev, &msg, NULL, 0);
@@ -2662,6 +2724,9 @@ static int vhost_user_get_inflight_fd(struct vhost_dev *dev,
return 0;
}
+ struct VhostUserState *us = u->user;
+ QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
+
ret = vhost_user_write(dev, &msg, NULL, 0);
if (ret < 0) {
return ret;
@@ -2757,6 +2822,7 @@ bool vhost_user_init(VhostUserState *user, CharBackend *chr, Error **errp)
user->memory_slots = 0;
user->notifiers = g_ptr_array_new_full(VIRTIO_QUEUE_MAX / 4,
&vhost_user_state_destroy);
+ qemu_mutex_init(&user->vhost_user_request_reply_lock);
return true;
}
@@ -2769,6 +2835,7 @@ void vhost_user_cleanup(VhostUserState *user)
user->notifiers = (GPtrArray *) g_ptr_array_free(user->notifiers, true);
memory_region_transaction_commit();
user->chr = NULL;
+ qemu_mutex_destroy(&user->vhost_user_request_reply_lock);
}
@@ -2902,6 +2969,9 @@ static int vhost_user_set_device_state_fd(struct vhost_dev *dev,
return -ENOTSUP;
}
+ struct VhostUserState *us = vu->user;
+ QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
+
ret = vhost_user_write(dev, &msg, &fd, 1);
close(fd);
if (ret < 0) {
@@ -2965,6 +3035,10 @@ static int vhost_user_check_device_state(struct vhost_dev *dev, Error **errp)
return -ENOTSUP;
}
+ struct vhost_user *u = dev->opaque;
+ struct VhostUserState *us = u->user;
+ QEMU_LOCK_GUARD(&us->vhost_user_request_reply_lock);
+
ret = vhost_user_write(dev, &msg, NULL, 0);
if (ret < 0) {
error_setg_errno(errp, -ret,
@@ -67,6 +67,9 @@ typedef struct VhostUserState {
GPtrArray *notifiers;
int memory_slots;
bool supports_config;
+
+ /* Hold lock for a request-reply cycle */
+ QemuMutex vhost_user_request_reply_lock;
} VhostUserState;
/**